diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 6728ff8914..f8469ae5a3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,8 +1,7 @@ { - "image": "mcr.microsoft.com/devcontainers/go:1-bookworm", + "image": "mcr.microsoft.com/devcontainers/go:1.24-bookworm", "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, - "ghcr.io/rio/features/k3d:1": {}, "ghcr.io/mpriscella/features/kind:1": {}, "ghcr.io/rjfmachado/devcontainer-features/cloud-native:1": { "kubectl": "latest", @@ -13,11 +12,11 @@ "cilium": "none" }, "ghcr.io/guiyomh/features/golangci-lint:0": {}, - "ghcr.io/devcontainers-contrib/features/kubectx-kubens:1": {}, + "ghcr.io/devcontainers-extra/features/kubectx-kubens:1": {}, "ghcr.io/dhoeric/features/stern:1": {} }, - // Needed by kind and k3s to enable kube-proxy's ipvs mode + // Needed by kind to enable kube-proxy's ipvs mode "mounts":["type=bind,source=/lib/modules,target=/lib/modules"], // Enable kubectl short alias with completion diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 0506b9a579..6dbed05d9e 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,11 +48,14 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: - - 1.24.0 - - 1.23.4 - - trunk (main) - - older in 1.23.x - - older minor (unsupported) + - "1.27 (latest patch)" + - "1.26 (latest patch)" + - "1.25 (latest patch)" + - "trunk (main)" + - "older in 1.27.x" + - "older in 1.26.x" + - "older in 1.25.x" + - "older minor (unsupported)" validations: required: true - type: dropdown @@ -60,12 +63,13 @@ body: attributes: label: What version of Kubernetes are you using? options: - - 1.31 - - 1.30 - - 1.29 - - 1.28 - - 1.27 (unsupported) - - other (unsupported) + - "1.34" + - "1.33" + - "1.32" + - "1.31" + - "1.30" + - "1.29" + - "other (unsupported)" validations: required: true - type: dropdown diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f522006c44..237e748a6b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,5 +4,5 @@ contact_links: url: https://github.com/cloudnative-pg/cloudnative-pg/discussions about: Please ask and answer questions here. - name: Slack chat - url: https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g + url: https://github.com/cloudnative-pg/cloudnative-pg?tab=readme-ov-file#communications about: Please join the slack channel and interact with our community diff --git a/.github/ISSUE_TEMPLATE/release-notes.yml b/.github/ISSUE_TEMPLATE/release-notes.yml new file mode 100644 index 0000000000..8b40b9e731 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release-notes.yml @@ -0,0 +1,48 @@ +name: Release Notes +description: Release notes for a new version of CloudNativePG +title: "[Release Notes]: CloudNativePG 1.XX.Y and 1.XX-1.Z" +labels: ["triage", "documentation"] +projects: ["cloudnative-pg/cloudnative-pg"] +assignees: + - gbartolini +body: + - type: markdown + attributes: + value: | + Make sure that the correct versions are reported in the title of the ticket. + - type: checkboxes + id: search + attributes: + label: Is there an existing issue already for this task? + description: Before you submit a new issue, make sure you have searched if a similar one already exists + options: + - label: I have searched for an existing issue, and could not find anything. I believe this is a new request. + required: true + - type: dropdown + id: minor + attributes: + label: Is this a new minor release? + description: Is this a new minor release for CloudNativePG? If so, make sure you check the `contribute/release-notes-template.md` file. + options: + - "No" + - "Yes" + validations: + required: true + - type: dropdown + id: preview + attributes: + label: Is this a preview release? + description: Is this a preview release for CloudNativePG? If so, make sure you add `-RC1` to the version and update the `preview_version.md` file. + options: + - "No" + - "Yes" + validations: + required: true + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md) + options: + - label: I agree to follow this project's Code of Conduct + required: true diff --git a/.github/aks_versions.json b/.github/aks_versions.json index b6064884d3..52919c25a3 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ - "1.30.3", - "1.29.7", - "1.28.9", - "1.27.9" + "1.33.2", + "1.32.6", + "1.31.9", + "1.30.9" ] diff --git a/.github/e2e-matrix-generator.py b/.github/e2e-matrix-generator.py index 1270ef907a..6a2ef3ff64 100644 --- a/.github/e2e-matrix-generator.py +++ b/.github/e2e-matrix-generator.py @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# import argparse import json diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 9fbe6428a3..f452ade413 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -1,6 +1,5 @@ [ - "1.30", - "1.29", - "1.28", - "1.27" + "1.33", + "1.32", + "1.31" ] diff --git a/.github/generate-test-artifacts.py b/.github/generate-test-artifacts.py index 64f2579999..c81268ec9c 100644 --- a/.github/generate-test-artifacts.py +++ b/.github/generate-test-artifacts.py @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# import argparse import json diff --git a/.github/gke_versions.json b/.github/gke_versions.json index 9fbe6428a3..1d12057c20 100644 --- a/.github/gke_versions.json +++ b/.github/gke_versions.json @@ -1,6 +1,6 @@ [ - "1.30", - "1.29", - "1.28", - "1.27" + "1.33", + "1.32", + "1.31", + "1.30" ] diff --git a/.github/k8s_versions_scope.json b/.github/k8s_versions_scope.json index 4842004177..b3cf9b61ec 100644 --- a/.github/k8s_versions_scope.json +++ b/.github/k8s_versions_scope.json @@ -1,10 +1,10 @@ { "e2e_test": { - "KIND": {"min": "1.27", "max": ""}, - "AKS": {"min": "1.27", "max": ""}, - "EKS": {"min": "1.27", "max": ""}, - "GKE": {"min": "1.27", "max": ""}, - "OPENSHIFT": {"min": "4.12", "max": ""} + "KIND": {"min": "1.29", "max": ""}, + "AKS": {"min": "1.29", "max": ""}, + "EKS": {"min": "1.29", "max": ""}, + "GKE": {"min": "1.29", "max": ""}, + "OPENSHIFT": {"min": "4.16", "max": ""} }, - "unit_test": {"min": "1.27", "max": "1.31"} + "unit_test": {"min": "1.29", "max": "1.34"} } diff --git a/.github/kind_versions.json b/.github/kind_versions.json index e70b300c06..9c0a6319a3 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,7 +1,10 @@ [ - "v1.31.0", - "v1.30.4", - "v1.29.8", - "v1.28.13", + "v1.34.0", + "v1.33.4", + "v1.32.8", + "v1.31.12", + "v1.30.13", + "v1.29.14", + "v1.28.15", "v1.27.16" ] diff --git a/.github/openshift_versions.json b/.github/openshift_versions.json index 08587af16a..1421d20ce0 100644 --- a/.github/openshift_versions.json +++ b/.github/openshift_versions.json @@ -1,8 +1,9 @@ [ + "4.19", + "4.18", "4.17", "4.16", "4.15", "4.14", - "4.13", "4.12" ] diff --git a/.github/pg_versions.json b/.github/pg_versions.json index d2df502134..7c8fd728b0 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,26 +1,26 @@ { + "18": [ + "18beta2", + "18beta2-8" + ], "17": [ - "17rc1", - "17rc1-5" + "17.5", + "17.4" ], "16": [ - "16.4", - "16.3" + "16.9", + "16.8" ], "15": [ - "15.8", - "15.7" + "15.13", + "15.12" ], "14": [ - "14.13", - "14.12" + "14.18", + "14.17" ], "13": [ - "13.16", - "13.15" - ], - "12": [ - "12.20", - "12.19" + "13.21", + "13.20" ] } \ No newline at end of file diff --git a/.github/postgres-versions-update.py b/.github/postgres-versions-update.py index 25ce0402d1..7a5f6055d5 100644 --- a/.github/postgres-versions-update.py +++ b/.github/postgres-versions-update.py @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# import re import pprint @@ -21,7 +24,7 @@ from packaging import version from subprocess import check_output -min_supported_major = 12 +min_supported_major = 13 pg_repo_name = "cloudnative-pg/postgresql" pg_version_re = re.compile(r"^(\d+)(?:\.\d+|beta\d+|rc\d+|alpha\d+)(-\d+)?$") diff --git a/.github/renovate.json5 b/.github/renovate.json5 index d03e3a76ae..d15e933194 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -1,429 +1,312 @@ { - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "extends": [ - "config:base" + $schema: 'https://docs.renovatebot.com/renovate-schema.json', + extends: [ + 'config:recommended', + 'customManagers:dockerfileVersions', + 'docker:pinDigests', + 'helpers:pinGitHubActionDigests', ], - "rebaseWhen": "never", -// The maximum number of PRs to be created in parallel - "prConcurrentLimit": 5, -// The branches renovate should target -// PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["main","release-1.22", "release-1.23", "release-1.24"], - "ignorePaths": ["docs/**", "releases/**", "contribute/**", "licenses/**", "pkg/versions/**"], - "postUpdateOptions": ["gomodTidy"], - "semanticCommits": "enabled", -// All PRs should have a label - "labels": ["automated", "do not backport", "no-issue"], - "regexManagers": [ + rebaseWhen: 'never', + prConcurrentLimit: 5, + baseBranches: [ + 'main', + 'release-1.25', + 'release-1.26', + 'release-1.27', + ], + ignorePaths: [ + 'docs/**', + 'releases/**', + 'contribute/**', + 'licenses/**', + 'pkg/versions/**', + 'pkg/specs/pgbouncer/', + ], + postUpdateOptions: [ + 'gomodTidy', + ], + semanticCommits: 'enabled', + labels: [ + 'automated', + 'do not backport', + 'no-issue', + ], + customManagers: [ { - // We want a PR to bump Kustomize version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "KUSTOMIZE_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "go", - "depNameTemplate": "sigs.k8s.io/kustomize/kustomize/v5", - }, { - // We want a PR to bump controller-gen version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "CONTROLLER_TOOLS_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "go", - "depNameTemplate": "sigs.k8s.io/controller-tools", - }, { - // We want a PR to bump goreleaser version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "GORELEASER_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "go", - "versioningTemplate": "loose", - "depNameTemplate": "github.com/goreleaser/goreleaser", - }, { - // We want a PR to bump the external-snapshotter version - "fileMatch": [ - "^.github/workflows/continuous-delivery.yml", - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_SNAPSHOTTER_VERSION: \"(?.*?)\"", - "EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-snapshotter", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the external-provisioner version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-provisioner", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the external-resizer version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_RESIZER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-resizer", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the external-attacher version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_ATTACHER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-attacher", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the csi-driver-host-path version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/csi-driver-host-path", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the rook version - "fileMatch": [ - "^.github/workflows/continuous-delivery.yml", - ], - "matchStrings": [ - "ROOK_VERSION: \"(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "rook/rook", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump kind version - "fileMatch": [ - "^.github/workflows/continuous-delivery.yml", - "^.github/workflows/continuous-integration.yml", - ], - "matchStrings": [ - "KIND_VERSION: \"(?.*?)\"", - ], - "datasourceTemplate": "github-tags", - "depNameTemplate": "kubernetes-sigs/kind", - }, { - // We want a PR to bump kind node version - "fileMatch": [ - "^hack/setup-cluster.sh$", - "^hack/e2e/run-e2e-kind.sh$", - ], - "matchStrings": [ - "KIND_NODE_DEFAULT_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "kindest/node", - }, { - // We want a PR to bump k3d node version - "fileMatch": [ - "^hack/setup-cluster.sh$", - "^hack/e2e/run-e2e-k3d.sh$", - ], - "matchStrings": [ - "K3D_NODE_DEFAULT_VERSION=(?.*?)\\n", - ], - "versioningTemplate": "regex:^v(?\\d+)(\\.(?\\d+))?(\\.(?\\d+))(\\+k3s?(?\\d+))?$", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)", - "datasourceTemplate": "github-releases", - "depNameTemplate": "k3s-io/k3s", - }, { - // We want a PR to bump spellcheck version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "SPELLCHECK_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "jonasbn/github-action-spellcheck", - }, { - // We want a PR to bump woke version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "WOKE_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "getwoke/woke", - }, { - // We want a PR to bump operator-sdk in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "OPERATOR_SDK_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "operator-framework/operator-sdk", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump operator package manager (opm) in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "OPM_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "operator-framework/operator-registry", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump redhat-preflight in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "PREFLIGHT_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "redhat-openshift-ecosystem/openshift-preflight", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?\\d+\\.\\d+\\.\\d+)" - }, { - "fileMatch": [ - "^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$", - "^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$", - ], - "matchStrings": [ - "image: quay.io/operator-framework/scorecard-test:(?.*?)\\n", - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "quay.io/operator-framework/scorecard-test", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - },{ - // We want a PR to bump Default Container Images versions. - "fileMatch": [ - "^pkg\\/versions\\/versions\\.go$", - "^pkg\\/specs\\/pgbouncer\\/deployments\\.go$" - ], - "matchStrings": [ - "DefaultImageName = \"(?.+?):(?.*?)\"\\n", - "DefaultPgbouncerImage = \"(?.+?):(?.*?)\"\\n", - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - }, { -// We want a PR to bump Go versions used through env variables in any GitHub -// Actions, taking it from the official GitHub repository. - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], - "matchStrings": [ - "GOLANG_VERSION: \"(?.*?)\\.x\"", - ], - "datasourceTemplate": "golang-version", - "depNameTemplate": "golang", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?\\d+\\.\\d+)" - }, { -// We want a PR to bump golangci-lint versions used through env variables in -// any GitHub Actions or Makefile, taking it from the official GitHub -// repository tags. - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], - "matchStrings": [ - "GOLANGCI_LINT_VERSION: \"v(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "golangci/golangci-lint", - "versioningTemplate": "loose", - "extractVersionTemplate": "^v(?\\d+\\.\\d+\\.\\d+)" - }, { - "fileMatch": ["^.github/workflows/continuous-delivery.yml",], - "matchStrings": [ - "VELERO_VERSION: \"v(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "vmware-tanzu/velero", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - "fileMatch": ["^.github/workflows/continuous-delivery.yml",], - "matchStrings": [ - "VELERO_AWS_PLUGIN_VERSION: \"v(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "vmware-tanzu/velero-plugin-for-aws", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - } + customType: 'regex', + managerFilePatterns: [ + '/^Makefile$/', + ], + matchStrings: [ + '# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*\\?=\\s*["\']?(?.+?)["\']?\\s', + ], + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^\\.github\\/workflows\\/[^/]+\\.ya?ml$/', + ], + matchStrings: [ + '# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*: \\s*["\']?(?.+?)["\']?\\s', + ], + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^.github/workflows/continuous-delivery.yml/', + '/^hack/setup-cluster.sh$/', + ], + matchStrings: [ + 'EXTERNAL_SNAPSHOTTER_VERSION: "(?.*?)"', + 'EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-snapshotter', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', + ], + matchStrings: [ + 'EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-provisioner', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', + ], + matchStrings: [ + 'EXTERNAL_RESIZER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-resizer', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', + ], + matchStrings: [ + 'EXTERNAL_ATTACHER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-attacher', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', + ], + matchStrings: [ + 'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/csi-driver-host-path', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', + '/^hack/e2e/run-e2e-kind.sh$/', + ], + matchStrings: [ + 'KIND_NODE_DEFAULT_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + depNameTemplate: 'kindest/node', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$/', + '/^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$/', + ], + matchStrings: [ + 'image: quay.io/operator-framework/scorecard-test:(?.*?)\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + depNameTemplate: 'quay.io/operator-framework/scorecard-test', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^pkg\\/versions\\/versions\\.go$/', + '/^pkg\\/specs\\/pgbouncer\\/deployments\\.go$/', + ], + matchStrings: [ + 'DefaultImageName = "(?.+?):(?.*?)"\\n', + 'DefaultPgbouncerImage = "(?.+?):(?.*?)"\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + }, + { + customType: 'regex', + managerFilePatterns: [ + '/^tests\\/utils\\/minio\\/minio\\.go$/', + ], + matchStrings: [ + 'minioImage = "(?.+?):(?.*?)"', + 'minioClientImage = "(?.+?):(?.*?)"', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'regex:^RELEASE\\.(?\\d{4})-(?\\d{2})-(?\\d{2})T\\d{2}-\\d{2}-\\d{2}Z$', + }, ], - "packageRules": [ + packageRules: [ + { + matchDatasources: [ + 'docker', + ], + allowedVersions: '!/alpha/', + }, + { + matchDatasources: [ + 'go', + ], + matchDepNames: [ + 'k8s.io/client-go', + ], + allowedVersions: '<1.0', + }, + { + matchDatasources: [ + 'go', + ], + groupName: 'kubernetes patches', + matchUpdateTypes: [ + 'patch', + 'digest', + ], + matchPackageNames: [ + 'k8s.io{/,}**', + 'sigs.k8s.io{/,}**', + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], + matchDepNames: [ + '!sigs.k8s.io/kustomize/kustomize/v5', + '!sigs.k8s.io/controller-tools', + ], + }, + { + matchDatasources: [ + 'go', + ], + matchUpdateTypes: [ + 'major', + 'minor', + ], + matchPackageNames: [ + 'k8s.io{/,}**', + 'sigs.k8s.io{/,}**', + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], + }, { - "matchDatasources": [ - "docker" + matchDatasources: [ + 'go', + ], + matchUpdateTypes: [ + 'major', + ], + matchPackageNames: [ + '*', + '!k8s.io{/,}**', + '!sigs.k8s.io{/,}**', + '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], + }, + { + matchDatasources: [ + 'go', + ], + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + ], + groupName: 'all non-major go dependencies', + matchPackageNames: [ + '*', + '!k8s.io{/,}**', + '!sigs.k8s.io{/,}**', + '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + '!github.com/cloudnative-pg/{/,}**', + ], + }, + { + matchDatasources: [ + 'github-tags', + ], + matchUpdateTypes: [ + 'digest', + 'pinDigest', + 'minor', + 'patch', + ], + groupName: 'all non-major github action', + pinDigests: true, + }, + { + groupName: 'kubernetes CSI', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'kubernetes-csi{/,}**', + 'rook{/,}**', + ], + }, + { + groupName: 'backup test tools', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'vmware-tanzu{/,}**', + 'minio{/,}**', ], - "allowedVersions": "!/alpha/", }, { -// We need to ignore k8s.io/client-go older versions as they switched to -// semantic version and old tags are still available in the repo. - "matchDatasources": [ - "go" - ], - "matchDepNames": [ - "k8s.io/client-go" - ], - "allowedVersions": "<1.0" - }, { -// We want a single PR for all the patches bumps of kubernetes related -// dependencies, as usually these are all strictly related. - "matchDatasources": [ - "go" - ], - "groupName": "kubernetes patches", - "matchUpdateTypes": [ - "patch", - "digest" - ], - "matchPackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ], - "excludeDepNames": [ - "sigs.k8s.io/kustomize/kustomize/v5", - "sigs.k8s.io/controller-tools" - ] - }, { -// We want dedicated PRs for each minor and major bumps to kubernetes related -// dependencies. - "matchDatasources": [ - "go" - ], - "matchUpdateTypes": [ - "major", - "minor" - ], - "matchPackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ] - }, { -// We want dedicated PRs for each bump to non-kubernetes Go dependencies. - "matchDatasources": [ - "go" - ], - "matchPackagePatterns": [ - "*" - ], - "excludePackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ], - "matchUpdateTypes": [ - "major", - ], - }, { -// We want a single PR for all minor and patch bumps to non-kubernetes Go -// dependencies. - "matchDatasources": [ - "go" - ], - "matchPackagePatterns": [ - "*" - ], - "excludePackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ], - "matchUpdateTypes": [ - "minor", - "patch", - "digest" - ], - "groupName": "all non-major go dependencies" - }, { -// We want a single PR for all minor and patch bumps of GitHub Actions - "matchDepTypes": [ - "action" - ], - "matchUpdateTypes": [ - "minor", - "patch" - ], - "groupName": "all non-major github action", - "pinDigests": false - },{ -// We want dedicated PRs for each major bump to GitHub Actions - "matchDepTypes": [ - "action" - ], - "pinDigests": false - },{ -// PR group for Kubernetes CSI - "groupName": "kubernetes CSI", - "matchPackagePrefixes": [ - "kubernetes-csi", - "rook", - ], - "separateMajorMinor": "false", - "pinDigests": false - }, { -// PR group for backup test tools - "groupName": "backup test tools", - "matchPackagePrefixes": [ - "vmware-tanzu", - ], - "separateMajorMinor": "false", - "pinDigests": false + groupName: 'operator framework', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'operator-framework{/,}**', + 'redhat-openshift-ecosystem{/,}**', + 'quay.io/operator-framework{/,}**', + ], }, { -// PR group for all the operator framework related things - "groupName": "operator framework", - "matchPackagePrefixes": [ - "operator-framework", - "redhat-openshift-ecosystem", - "quay.io/operator-framework", - ], - "separateMajorMinor": "false", - "pinDigests": false + groupName: 'cnpg', + matchPackageNames: [ + 'github.com/cloudnative-pg{/,}**', + ], + separateMajorMinor: false, + pinDigests: false, }, { -// PR group for spellcheck - "groupName": "spellcheck", - "matchPackagePrefixes": [ - "jonasbn/github-action-spellcheck", - "rojopolis/spellcheck-github-actions", - ], - "separateMajorMinor": "false", - "pinDigests": false, - } - ] + groupName: 'container distroless digests', + matchPackageNames: [ + 'gcr.io/distroless{/,}**', + ], + pinDigests: true, + separateMajorMinor: false, + }, + ], } diff --git a/.github/report-failed-test.sh b/.github/report-failed-test.sh index af88177a79..791d6060cf 100644 --- a/.github/report-failed-test.sh +++ b/.github/report-failed-test.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -14,6 +15,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## echo '::echo::off' diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 3cb706ce64..60679b5462 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -8,35 +8,40 @@ on: branches: - main +permissions: read-all + env: - GOLANG_VERSION: "1.23.x" + # renovate: datasource=golang-version depName=golang versioning=loose + GOLANG_VERSION: "1.25.0" jobs: # Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default # we backport everything, except those PR that are created or contain `do not backport` explicitly. label-source-pr: name: Add labels to PR - if: | + if: | github.event.pull_request.merged == false && !contains(github.event.pull_request.labels.*.name, 'backport-requested') && !contains(github.event.pull_request.labels.*.name, 'do not backport') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + pull-requests: write steps: - name: Label the pull request - uses: actions-ecosystem/action-add-labels@v1 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1 if: ${{ !contains(github.event.pull_request.labels.*.name, 'do not backport') }} with: github_token: ${{ secrets.REPO_GHA_PAT }} number: ${{ github.event.pull_request.number }} labels: | backport-requested :arrow_backward: - release-1.22 - release-1.23 - release-1.24 + release-1.25 + release-1.26 + release-1.27 - name: Create comment - uses: peter-evans/create-or-update-comment@v4 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ github.event.pull_request.number }} @@ -49,15 +54,15 @@ jobs: reactions: heart - name: Remove redundant labels - uses: actions-ecosystem/action-remove-labels@v1 + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1 if: ${{ contains(github.event.pull_request.labels.*.name, 'do not backport') }} with: github_token: ${{ secrets.REPO_GHA_PAT }} labels: | backport-requested :arrow_backward: - release-1.22 - release-1.23 - release-1.24 + release-1.25 + release-1.26 + release-1.27 ## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels back-porting-pr: @@ -69,11 +74,11 @@ jobs: contains(github.event.pull_request.labels.*.name, 'backport-requested :arrow_backward:') ) && !contains(github.event.pull_request.labels.*.name, 'do not backport') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.23, release-1.24] + branch: [release-1.25, release-1.26, release-1.27] env: PR: ${{ github.event.pull_request.number }} outputs: @@ -82,14 +87,14 @@ jobs: - name: Checkout code if: contains( github.event.pull_request.labels.*.name, matrix.branch ) - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: fetch-depth: 0 ref: ${{ matrix.branch }} token: ${{ secrets.REPO_GHA_PAT }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -141,10 +146,12 @@ jobs: env: PR: ${{ github.event.pull_request.number }} COMMIT: ${{ needs.back-porting-pr.outputs.commit }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + issues: write steps: - name: create ticket - uses: dacbd/create-issue-action@v2 + uses: dacbd/create-issue-action@cdb57ab6ff8862aa09fee2be6ba77a59581921c2 # v2 with: token: ${{ secrets.GITHUB_TOKEN }} title: Backport failure for pull request ${{ env.PR }} diff --git a/.github/workflows/chatops.yml b/.github/workflows/chatops.yml index b2058ca48e..0da6dd096e 100644 --- a/.github/workflows/chatops.yml +++ b/.github/workflows/chatops.yml @@ -9,20 +9,22 @@ on: issue_comment: types: [created] +permissions: read-all + jobs: ok-to-merge: if: | github.event.issue.pull_request && startsWith(github.event.comment.body, '/ok-to-merge') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check User Permission id: checkUser - uses: actions-cool/check-user-permission@v2 + uses: actions-cool/check-user-permission@7b90a27f92f3961b368376107661682c441f6103 # v2 with: require: 'write' - name: Add "ok to merge :ok_hand:" label to PR - uses: actions-ecosystem/action-add-labels@v1.1.3 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.3 if: steps.checkUser.outputs.require-result == 'true' with: github_token: ${{ secrets.REPO_GHA_PAT }} diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml new file mode 100644 index 0000000000..2e9057f3f9 --- /dev/null +++ b/.github/workflows/close-inactive-issues.yml @@ -0,0 +1,25 @@ +# See https://github.com/marketplace/actions/close-stale-issues +name: Close inactive issues +on: + workflow_dispatch: + schedule: + - cron: "30 1 * * *" + +permissions: read-all + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9 + with: + days-before-issue-stale: 60 + days-before-issue-close: 14 + stale-issue-message: "This issue is stale because it has been open for 60 days with no activity." + close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." + days-before-pr-stale: -1 + days-before-pr-close: -1 + ascending: true + exempt-issue-labels: "no-stale" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d2e84c807b..0d93c0ff29 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,20 +30,23 @@ on: schedule: - cron: '24 0 * * 5' +permissions: read-all + # set up environment variables to be used across all the jobs env: - GOLANG_VERSION: "1.23.x" + # renovate: datasource=golang-version depName=golang versioning=loose + GOLANG_VERSION: "1.25.0" jobs: duplicate_runs: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 name: Skip duplicate runs continue-on-error: true outputs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} steps: - id: skip_check - uses: fkirc/skip-duplicate-actions@v5.3.1 + uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1 with: concurrent_skipping: 'same_content' skip_after_successful_duplicate: 'true' @@ -58,23 +61,22 @@ jobs: if: | needs.duplicate_runs.outputs.should_skip != 'true' permissions: - actions: read contents: read security-events: write steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: languages: "go" build-mode: manual @@ -91,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a2a9e8f77f..6cb8f9aa6b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -20,10 +20,10 @@ on: default: '4' feature_type: description: > - Feature Type (disruptive, performance, upgrade, smoke, basic, service-connectivity, self-healing, - backup-restore, snapshot, operator, observability, replication, plugin, postgres-configuration, - pod-scheduling, cluster-metadata, recovery, importing-databases, storage, security, maintenance, - tablespaces) + Feature Type (backup-restore, basic, cluster-metadata, declarative-databases, disruptive, + importing-databases, maintenance, no-openshift, observability, operator, performance, plugin, + pod-scheduling, postgres-configuration, postgres-major-upgrade, publication-subscription, recovery, + replication, security, self-healing, service-connectivity, smoke, snapshot, storage, tablespaces, upgrade) required: false log_level: description: 'Log level for operator (error, warning, info, debug(default), trace)' @@ -32,13 +32,18 @@ on: schedule: - cron: '0 1 * * *' +permissions: read-all + # set up environment variables to be used across all the jobs env: - GOLANG_VERSION: "1.23.x" + # renovate: datasource=golang-version depName=golang versioning=loose + GOLANG_VERSION: "1.25.0" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.24.0" - ROOK_VERSION: "v1.15.2" - EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" + # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver + KIND_VERSION: "v0.30.0" + # renovate: datasource=github-releases depName=rook/rook versioning=loose + ROOK_VERSION: "v1.17.7" + EXTERNAL_SNAPSHOTTER_VERSION: "v8.3.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" BUILD_PUSH_CACHE_FROM: "" @@ -47,6 +52,7 @@ env: REGISTRY_USER: ${{ github.actor }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} REPOSITORY_OWNER: "cloudnative-pg" + SIGN_IMAGES: "true" SLACK_USERNAME: "cnpg-bot" BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager" # Keep in mind that adding more platforms (architectures) will increase the building @@ -63,16 +69,18 @@ jobs: # Trigger the workflow on release-* branches for smoke testing whenever it's a scheduled run. # Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch smoke_test_release_branches: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + actions: write name: smoke test release-* branches when it's a scheduled run if: github.event_name == 'schedule' strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.23, release-1.24] + branch: [release-1.25, release-1.26, release-1.27] steps: - name: Invoke workflow with inputs - uses: benc-uk/workflow-dispatch@v1 + uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 with: workflow: continuous-delivery ref: ${{ matrix.branch }} @@ -81,10 +89,15 @@ jobs: check_commenter: if: | github.event_name == 'issue_comment' && + github.event.action == 'created' && github.event.issue.pull_request && startsWith(github.event.comment.body, '/test') name: Retrieve command - runs-on: ubuntu-22.04 + permissions: + pull-requests: write + contents: read + issues: read + runs-on: ubuntu-24.04 outputs: github_ref: ${{ steps.refs.outputs.head_sha }} depth: ${{ env.DEPTH }} @@ -95,7 +108,7 @@ jobs: steps: - name: Check for Command id: command - uses: xt0rted/slash-command-action@v2 + uses: xt0rted/slash-command-action@bf51f8f5f4ea3d58abc7eca58f77104182b23e88 # v2 continue-on-error: false with: command: test @@ -145,11 +158,11 @@ jobs: echo "LOG_LEVEL=${LOG_LEVEL}" >> $GITHUB_ENV - name: Resolve Git reference - uses: xt0rted/pull-request-comment-branch@v2 + uses: xt0rted/pull-request-comment-branch@e8b8daa837e8ea7331c0003c9c316a64c6d8b0b1 # v3 id: refs - name: Create comment - uses: peter-evans/create-or-update-comment@v4 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 with: token: ${{ secrets.GITHUB_TOKEN }} repository: ${{ github.repository }} @@ -161,7 +174,7 @@ jobs: name: Parse arguments if: | github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: github_ref: ${{ github.ref }} depth: ${{ env.DEPTH }} @@ -199,7 +212,7 @@ jobs: needs: - check_commenter - test_arguments - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: | ( needs.check_commenter.result == 'success' || @@ -242,13 +255,13 @@ jobs: if: | always() && !cancelled() && needs.evaluate_options.result == 'success' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 permissions: contents: read packages: write pull-requests: read + id-token: write outputs: - image: ${{ steps.image-meta.outputs.image }} # 'branch_name' is used in 'GetMostRecentReleaseTag' in the Go code branch_name: ${{ steps.build-meta.outputs.branch_name }} upload_artifacts: ${{ steps.build-meta.outputs.upload_artifacts }} @@ -257,20 +270,23 @@ jobs: author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} controller_img: ${{ env.CONTROLLER_IMG }} - controller_img_ubi8: ${{ env.CONTROLLER_IMG_UBI8 }} + controller_img_digest: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['containerimage.digest'] }} + controller_img_prime_digest: ${{ steps.build-prime.outputs.digest }} + controller_img_ubi: ${{ env.CONTROLLER_IMG_UBI }} + index_img: ${{ env.INDEX_IMG }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} # To identify the commit we need the history and all the tags. fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -280,9 +296,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - images='${{ env.OPERATOR_IMAGE_NAME }}' - tags='' - labels='' commit_sha=${{ needs.evaluate_options.outputs.git_ref }} commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}" || : ) # use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0) @@ -312,25 +325,23 @@ jobs: fi # extract tag from branch name - tag_name=$(echo "$branch_name" | sed 's/[^a-zA-Z0-9]/-/g') + tag_name=$(echo "$branch_name" | tr / -) upload_artifacts=false if [[ ${branch_name} == main || ${branch_name} =~ ^release- ]]; then upload_artifacts=true fi - echo "IMAGES=${images}" >> $GITHUB_ENV - echo "TAGS=${tags}" >> $GITHUB_ENV - echo "LABELS=${labels}" >> $GITHUB_ENV echo "DATE=${commit_date}" >> $GITHUB_ENV echo "VERSION=${commit_version}" >> $GITHUB_ENV echo "COMMIT=${commit_short}" >> $GITHUB_ENV + echo "IMAGE_TAG=${tag_name,,}" >> $GITHUB_ENV + echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV echo "commit_sha=${commit_sha}" >> $GITHUB_OUTPUT echo "commit_msg=${commit_message}" >> $GITHUB_OUTPUT echo "author_name=${author_name}" >> $GITHUB_OUTPUT echo "author_email=${author_email}" >> $GITHUB_OUTPUT echo "branch_name=${branch_name}" >> $GITHUB_OUTPUT - echo "tag_name=${tag_name,,}" >> $GITHUB_OUTPUT echo "upload_artifacts=${upload_artifacts}" >> $GITHUB_OUTPUT - name: Set GoReleaser environment @@ -339,7 +350,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 with: distribution: goreleaser version: v2 @@ -348,102 +359,73 @@ jobs: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} VERSION: ${{ env.VERSION }} - - - name: Docker meta - id: docker-meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - tags: | - type=raw,value=${{ steps.build-meta.outputs.tag_name }} - - - name: Docker meta UBI8 - id: docker-meta-ubi8 - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - flavor: | - suffix=-ubi8 - tags: | - type=raw,value=${{ steps.build-meta.outputs.tag_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile - push: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta.outputs.tags }} - labels: ${{ env.LABELS }} - provenance: ${{ env.BUILD_PUSH_PROVENANCE }} - cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} - cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - - name: Build and push UBI8 - uses: docker/build-push-action@v6 + uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6 + id: bake-push + env: + environment: "testing" + buildVersion: ${{ env.VERSION }} + tag: ${{ env.IMAGE_TAG }} + registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }} + revision: ${{ env.COMMIT }} with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi8 + source: . push: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} - labels: ${{ env.LABELS }} - provenance: ${{ env.BUILD_PUSH_PROVENANCE }} - cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} - cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - - name: Image Meta - id: image-meta - env: - TAGS: ${{ steps.docker-meta.outputs.tags }} + no-cache: true + targets: "default" + - + name: Install cosign + if: env.SIGN_IMAGES == 'true' + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3 + # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ + # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on + # how to use cosign. + - + name: Sign images + if: env.SIGN_IMAGES == 'true' run: | - # If there is more than one tag, take the first one - # TAGS could be separated by newlines or commas - image=$(sed -n '1{s/,.*//; p}' <<< "$TAGS") - echo "image=${image}" >> $GITHUB_OUTPUT + images=$(echo '${{ steps.bake-push.outputs.metadata }}' | + jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + ) + cosign sign --yes ${images} - name: Output images env: - TAGS: ${{ steps.docker-meta.outputs.tags }} - TAGS_UBI8: ${{ steps.docker-meta-ubi8.outputs.tags }} + DISTROLESS: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['image.name'] }} + UBI: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} run: | - LOWERCASE_OPERATOR_IMAGE_NAME=${OPERATOR_IMAGE_NAME,,} - TAG=${TAGS#*:} - TAG_UBI=${TAGS_UBI8#*:} - echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV - echo "CONTROLLER_IMG_UBI8=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV - echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV - echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV + echo "CONTROLLER_IMG=${DISTROLESS}" >> $GITHUB_ENV + echo "CONTROLLER_IMG_UBI=${UBI}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${UBI}-bundle" >> $GITHUB_ENV + echo "INDEX_IMG=${UBI}-index" >> $GITHUB_ENV + echo "CATALOG_IMG=${UBI}-catalog" >> $GITHUB_ENV - name: Generate manifest for operator deployment id: generate-manifest env: - CONTROLLER_IMG: ${{ steps.image-meta.outputs.image }} + CONTROLLER_IMG: ${{ env.CONTROLLER_IMG }} + CONTROLLER_IMG_DIGEST: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['containerimage.digest'] }} run: | make generate-manifest - name: Upload the operator manifest as artifact in workflow - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: operator-manifest.yaml path: dist/operator-manifest.yaml @@ -457,7 +439,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build binary for upgrade test - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' @@ -479,7 +461,8 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build and push image for upgrade test - uses: docker/build-push-action@v6 + uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6 + id: build-prime if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' @@ -490,8 +473,7 @@ jobs: push: true build-args: | VERSION=${{ env.VERSION }}-prime - tags: ${{ steps.docker-meta.outputs.tags }}-prime - labels: ${{ env.LABELS }} + tags: ${{ env.CONTROLLER_IMG }}-prime provenance: ${{ env.BUILD_PUSH_PROVENANCE }} cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} @@ -506,11 +488,11 @@ jobs: needs.buildx.result == 'success' && needs.buildx.outputs.upload_artifacts == 'true' && github.repository_owner == 'cloudnative-pg' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout artifact - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} @@ -533,7 +515,7 @@ jobs: rm -fr manifests/operator-manifest.yaml - name: Prepare the operator manifest - uses: actions/download-artifact@v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: operator-manifest.yaml path: manifests @@ -552,7 +534,7 @@ jobs: git commit -m "${COMMIT_MESSAGE}" - name: Push changes - uses: ad-m/github-push-action@v0.8.0 + uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0 with: github_token: ${{ secrets.REPO_GHA_PAT }} repository: cloudnative-pg/artifacts @@ -572,9 +554,9 @@ jobs: if: | (always() && !cancelled()) && needs.buildx.result == 'success' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: - image: ${{ needs.buildx.outputs.image }} + controller_img: ${{ needs.buildx.outputs.controller_img }} localMatrix: ${{ steps.generate-jobs.outputs.localMatrix }} localEnabled: ${{ steps.generate-jobs.outputs.localEnabled }} localTimeout: ${{ steps.generate-jobs.outputs.localE2ETimeout }} @@ -593,7 +575,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -620,7 +602,7 @@ jobs: strategy: fail-fast: false matrix: ${{ fromJSON(needs.generate-jobs.outputs.localMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -639,7 +621,9 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: standard E2E_CSI_STORAGE_CLASS: csi-hostpath-sc E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-hostpath-snapclass @@ -649,7 +633,7 @@ jobs: steps: - name: Cleanup Disk - uses: jlumbroso/free-disk-space@main + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 with: android: true dotnet: true @@ -669,37 +653,30 @@ jobs: echo "-----------------------------------------------------" - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - - # 'Retry' preparing the E2E test ENV - name: Prepare the environment - uses: nick-fields/retry@v3 + name: Install Kind + uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 with: - timeout_seconds: 300 - max_attempts: 3 - on_retry_command: | - # Clear-ups before retries - sudo rm -rf /usr/local/bin/kind /usr/local/bin/kubectl - command: | - sudo apt-get update - sudo apt-get install -y gettext-base - sudo hack/setup-cluster.sh prepare /usr/local/bin + install_only: true + version: ${{ env.KIND_VERSION }} + kubectl_version: ${{ env.K8S_VERSION }} - name: Prepare patch for customization env: @@ -716,6 +693,8 @@ jobs: cat config/manager/env_override.yaml - name: Run Kind End-to-End tests + env: + ENABLE_APISERVER_AUDIT: true run: make e2e-test-kind - @@ -751,7 +730,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -777,7 +756,7 @@ jobs: # Archive logs for failed test cases if there are any name: Archive Kind logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: kind-logs-${{ matrix.id }} path: kind-logs/ @@ -785,7 +764,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -796,7 +775,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -822,18 +801,18 @@ jobs: - buildx - generate-jobs - evaluate_options - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: azure_storage_account: ${{ steps.setup.outputs.azure_storage_account }} steps: - name: Azure Login - uses: azure/login@v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Create AKS shared resources - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 id: setup with: timeout_minutes: 10 @@ -868,7 +847,7 @@ jobs: fail-fast: false max-parallel: 8 matrix: ${{ fromJSON(needs.generate-jobs.outputs.aksMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -890,7 +869,9 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: rook-ceph-block E2E_CSI_STORAGE_CLASS: rook-ceph-block E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-rbdplugin-snapclass @@ -899,18 +880,18 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Prepare the environment - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 300 max_attempts: 3 @@ -919,7 +900,7 @@ jobs: sudo apt-get install -y gettext-base - name: Install ginkgo - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 1 max_attempts: 3 @@ -928,24 +909,24 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Azure Login - uses: azure/login@v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Install kubectl - uses: azure/setup-kubectl@v4 + uses: azure/setup-kubectl@776406bce94f63e41d621b960d78ee25c8b76ede # v4 with: version: v${{ env.K8S_VERSION }} - name: Create AKS cluster - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 10 max_attempts: 3 @@ -991,7 +972,7 @@ jobs: # use rook to get the small PV we use in the tests. # It can still take a while to deploy rook. name: Set up Rook - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 27 max_attempts: 1 @@ -1069,7 +1050,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -1094,7 +1075,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -1104,7 +1085,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -1168,20 +1149,20 @@ jobs: - generate-jobs - e2e-aks-setup - e2e-aks - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: AZURE_STORAGE_ACCOUNT: ${{ needs.e2e-aks-setup.outputs.azure_storage_account }} steps: - name: Azure Login if: always() - uses: azure/login@v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Teardown AKS shared resources if: always() - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 5 max_attempts: 3 @@ -1209,7 +1190,7 @@ jobs: fail-fast: false max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.eksMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -1228,7 +1209,9 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: gp3 E2E_CSI_STORAGE_CLASS: gp3 E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: ebs-csi-snapclass @@ -1244,26 +1227,26 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-test-${{ github.run_number }}-$( echo ${{ matrix.id }} | tr -d '_.-' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Prepare the environment - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 300 max_attempts: 3 @@ -1272,7 +1255,7 @@ jobs: sudo apt-get install -y gettext-base - name: Install ginkgo - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 1 max_attempts: 3 @@ -1280,14 +1263,14 @@ jobs: go install github.com/onsi/ginkgo/v2/ginkgo - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: ${{ env.AWS_REGION }} - name: Install eksctl - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 1 max_attempts: 3 @@ -1339,10 +1322,12 @@ jobs: kubectl get storageclass - name: Setup Velero - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 env: - VELERO_VERSION: "v1.14.1" - VELERO_AWS_PLUGIN_VERSION: "v1.10.1" + # renovate: datasource=github-releases depName=vmware-tanzu/velero + VELERO_VERSION: "v1.16.2" + # renovate: datasource=docker depName=velero/velero-plugin-for-aws + VELERO_AWS_PLUGIN_VERSION: "v1.12.2" with: timeout_minutes: 10 max_attempts: 3 @@ -1438,7 +1423,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -1463,7 +1448,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -1473,7 +1458,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -1597,7 +1582,7 @@ jobs: fail-fast: false max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.gkeMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -1616,7 +1601,9 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: standard-rwo E2E_CSI_STORAGE_CLASS: standard-rwo E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: pd-csi-snapclass @@ -1627,26 +1614,26 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Prepare the environment - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 300 max_attempts: 3 @@ -1655,7 +1642,7 @@ jobs: sudo apt-get install -y gettext-base - name: Install ginkgo - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 120 max_attempts: 3 @@ -1673,12 +1660,12 @@ jobs: - name: Authenticate to Google Cloud id: 'auth' - uses: google-github-actions/auth@v2 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3 with: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' - name: Set up Cloud SDK and kubectl - uses: google-github-actions/setup-gcloud@v2 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3 with: project_id: ${{ secrets.GCP_PROJECT_ID }} install_components: 'kubectl,gke-gcloud-auth-plugin' @@ -1770,7 +1757,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -1795,7 +1782,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -1805,7 +1792,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -1894,7 +1881,10 @@ jobs: fail-fast: false max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.openshiftMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + contents: read + packages: write env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -1913,7 +1903,7 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} E2E_DEFAULT_STORAGE_CLASS: gp3-csi E2E_CSI_STORAGE_CLASS: gp3-csi E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-aws-vsc @@ -1937,28 +1927,28 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-ocp-${{ github.run_number}}-$( echo ${{ matrix.k8s_version }} | tr -d '.' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -1966,14 +1956,15 @@ jobs: - name: Build and push the operator and catalog env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} + INDEX_IMG: ${{ needs.buildx.outputs.index_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} run: | make olm-catalog - name: Install OC Installer and client - uses: redhat-actions/openshift-tools-installer@v1 + uses: redhat-actions/openshift-tools-installer@144527c7d98999f2652264c048c7a9bd103f8a82 # v1 with: source: "mirror" openshift-install: ${{ matrix.k8s_version }} @@ -1983,6 +1974,42 @@ jobs: run: | envsubst < hack/install-config.yaml.template > hack/install-config.yaml openshift-install create cluster --dir hack/ --log-level warn + - + name: Install operator-sdk + run: | + make operator-sdk + - + name: Install preflight + run: | + make preflight + - + name: Create Secret + run: | + export KUBECONFIG=$(pwd)/hack/auth/kubeconfig + oc create ns cloudnative-pg + oc -n cloudnative-pg create secret generic cnpg-pull-secret \ + --from-file=.dockerconfigjson=$HOME/.docker/config.json \ + --type=kubernetes.io/dockerconfigjson + - + name: Run preflight operator test + env: + BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} + PFLT_INDEXIMAGE: ${{ needs.buildx.outputs.index_img }} + PFLT_SCORECARD_WAIT_TIME: "1200" + PFLT_ARTIFACTS: "preflight_operator_results" + run: | + PATH=$(pwd)/bin/:${PATH} \ + KUBECONFIG=$(pwd)/hack/auth/kubeconfig \ + bin/preflight check operator ${BUNDLE_IMG} \ + --docker-config $HOME/.docker/config.json --loglevel trace + - + name: Check preflight operator results + run: | + PASS=`jq -r .passed preflight_operator_results/results.json` + if [[ "$PASS" == "false" ]] + then + exit 1 + fi - name: Run E2E tests if: (always() && !cancelled()) @@ -2027,7 +2054,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -2052,7 +2079,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -2062,7 +2089,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -2107,13 +2134,13 @@ jobs: needs.e2e-openshift.result == 'success' || needs.e2e-openshift.result == 'failure' )) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Create a directory for the artifacts run: mkdir test-artifacts - name: Download all artifacts to the directory - uses: actions/download-artifact@v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: path: test-artifacts pattern: testartifacts-* @@ -2131,13 +2158,13 @@ jobs: - name: Compute the E2E test summary id: generate-summary - uses: cloudnative-pg/ciclops@v1.3.0 + uses: cloudnative-pg/ciclops@f5a7b357a09f09052ec0358ac49e020f151f1653 # v1.3.1 with: artifact_directory: test-artifacts/data - name: If there is an overflow summary, archive it if: steps.generate-summary.outputs.Overflow - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: ${{ steps.generate-summary.outputs.Overflow }} path: ${{ steps.generate-summary.outputs.Overflow }} @@ -2146,7 +2173,7 @@ jobs: - name: Send the Ciclops view over Slack # Send the Ciclops thermometer on every scheduled run on `main`. # or when there are systematic failures in release branches - uses: rtCamp/action-slack-notify@v2 + uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2 if: | github.repository_owner == env.REPOSITORY_OWNER && ( @@ -2190,7 +2217,7 @@ jobs: needs.e2e-local.result == 'success' && github.event_name == 'issue_comment' && needs.evaluate_options.outputs.test_level == '4' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check preconditions id: get_pr_number_and_labels @@ -2203,7 +2230,7 @@ jobs: - name: Label the PR as "ok to merge :ok_hand:" if: | env.OK_LABEL == '' - uses: actions-ecosystem/action-add-labels@v1.1.3 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.3 with: github_token: ${{ secrets.REPO_GHA_PAT }} number: ${{ github.event.issue.number }} @@ -2219,7 +2246,7 @@ jobs: always() && needs.e2e-local.result == 'failure' && github.event_name == 'issue_comment' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check preconditions id: get_pr_number_and_labels @@ -2232,7 +2259,7 @@ jobs: - name: Remove "ok to merge :ok_hand:" label from PR if: | env.OK_LABEL != '' - uses: actions-ecosystem/action-remove-labels@v1.3.0 + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 with: github_token: ${{ secrets.REPO_GHA_PAT }} number: ${{ github.event.issue.number }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 427ccc631e..0ef7e20e3d 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -14,12 +14,17 @@ on: schedule: - cron: '0 1 * * *' +permissions: read-all + # set up environment variables to be used across all the jobs env: - GOLANG_VERSION: "1.23.x" - GOLANGCI_LINT_VERSION: "v1.61.0" + # renovate: datasource=golang-version depName=golang versioning=loose + GOLANG_VERSION: "1.25.0" + # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose + GOLANGCI_LINT_VERSION: "v2.4.0" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.24.0" + # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver + KIND_VERSION: "v0.30.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" SLACK_USERNAME: "cnpg-bot" @@ -30,11 +35,12 @@ env: BUILD_PUSH_CACHE_FROM: "" BUILD_PUSH_CACHE_TO: "" BUILD_PLUGIN_RELEASE_ARGS: "build --skip=validate --clean --id kubectl-cnpg --timeout 60m" - BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager" + BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager-race" REPOSITORY_OWNER: "cloudnative-pg" REGISTRY: "ghcr.io" REGISTRY_USER: ${{ github.actor }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + SIGN_IMAGES: "true" OPP_SCRIPT_URL: "https://raw.githubusercontent.com/redhat-openshift-ecosystem/community-operators-pipeline/ci/latest/ci/scripts/opp.sh" defaults: @@ -46,17 +52,18 @@ jobs: # Trigger the workflow on release-* branches for smoke testing whenever it's a scheduled run. # Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch smoke_test_release_branches: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + actions: write name: smoke test release-* branches when it's a scheduled run if: github.event_name == 'schedule' strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.23, release-1.24] - + branch: [release-1.25, release-1.26, release-1.27] steps: - name: Invoke workflow with inputs - uses: benc-uk/workflow-dispatch@v1 + uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 with: workflow: continuous-integration ref: ${{ matrix.branch }} @@ -65,14 +72,14 @@ jobs: # 1. it's on 'main' branch # 2. it's triggered by events in the 'do_not_skip' list duplicate_runs: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 name: Skip duplicate runs continue-on-error: true outputs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} steps: - id: skip_check - uses: fkirc/skip-duplicate-actions@v5.3.1 + uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1 with: concurrent_skipping: 'same_content' skip_after_successful_duplicate: 'true' @@ -86,7 +93,7 @@ jobs: name: Check changed files needs: duplicate_runs if: ${{ needs.duplicate_runs.outputs.should_skip != 'true' }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: docs-changed: ${{ steps.filter.outputs.docs-changed }} operator-changed: ${{ steps.filter.outputs.operator-changed }} @@ -96,10 +103,10 @@ jobs: renovate-changed: ${{ steps.filter.outputs.renovate-changed }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Check for changes - uses: dorny/paths-filter@v3.0.2 + uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: filter # Remember to add new folders in the operator-changed filter if needed with: @@ -120,8 +127,7 @@ jobs: - '.github/workflows/continuous-integration.yml' - '.goreleaser*.yml' - 'Dockerfile' - - 'Dockerfile-ubi8' - - 'Dockerfile-ubi9' + - 'docker-bake.hcl' - 'Makefile' - 'go.mod' - 'go.sum' @@ -145,13 +151,13 @@ jobs: - change-triage # We need always run linter as go linter is a required check if: needs.duplicate_runs.outputs.should_skip != 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: # Disable setup-go caching. Cache is better handled by the golangci-lint action cache: false @@ -159,7 +165,7 @@ jobs: check-latest: true - name: Run golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8 with: version: ${{ env.GOLANGCI_LINT_VERSION }} @@ -175,13 +181,13 @@ jobs: if: | needs.duplicate_runs.outputs.should_skip != 'true' && needs.change-triage.outputs.renovate-changed == 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Validate Renovate JSON - run: npx --yes --package renovate -- renovate-config-validator + run: npx --yes --package renovate@40.48.6 -- renovate-config-validator go-vulncheck: name: Run govulncheck @@ -194,10 +200,10 @@ jobs: needs.change-triage.outputs.operator-changed == 'true' || needs.change-triage.outputs.go-code-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Run govulncheck - uses: golang/govulncheck-action@v1 + uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1 with: go-version-input: ${{ env.GOLANG_VERSION }} check-latest: true @@ -211,15 +217,15 @@ jobs: if: | needs.duplicate_runs.outputs.should_skip != 'true' && needs.change-triage.outputs.shell-script-changed == 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: SHELLCHECK_OPTS: -a -S style steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Run ShellCheck - uses: ludeeus/action-shellcheck@2.0.0 + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 generate-unit-tests-jobs: name: Generate jobs for unit tests @@ -233,13 +239,13 @@ jobs: needs.change-triage.outputs.operator-changed == 'true' || needs.change-triage.outputs.go-code-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: k8sMatrix: ${{ steps.get-k8s-versions.outputs.k8s_versions }} latest_k8s_version: ${{ steps.get-k8s-versions.outputs.latest_k8s_version }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Get k8s versions for unit test id: get-k8s-versions @@ -272,17 +278,17 @@ jobs: needs.change-triage.outputs.operator-changed == 'true' || needs.change-triage.outputs.go-code-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: # The Unit test is performed per multiple supported k8s versions (each job for each k8s version) as below: k8s-version: ${{ fromJSON(needs.generate-unit-tests-jobs.outputs.k8sMatrix) }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -315,13 +321,13 @@ jobs: needs.change-triage.outputs.go-code-changed == 'true' || needs.change-triage.outputs.docs-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -350,13 +356,13 @@ jobs: needs.change-triage.outputs.go-code-changed == 'true' || needs.change-triage.outputs.operator-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -403,35 +409,38 @@ jobs: (needs.tests.result == 'success' || needs.tests.result == 'skipped') && (needs.apidoc.result == 'success' || needs.apidoc.result == 'skipped') && (needs.crd.result == 'success' || needs.crd.result == 'skipped') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 permissions: actions: read contents: read packages: write security-events: write + id-token: write outputs: commit_version: ${{ env.VERSION }} commit: ${{ env.COMMIT_SHA }} controller_img: ${{ env.CONTROLLER_IMG }} - controller_img_ubi8: ${{ env.CONTROLLER_IMG_UBI8 }} + controller_img_ubi: ${{ env.CONTROLLER_IMG_UBI }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} push: ${{ env.PUSH }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Build meta id: build-meta + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | commit_sha=${{ github.event.pull_request.head.sha || github.sha }} commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}" || : ) @@ -442,10 +451,22 @@ jobs: # shortened commit sha commit_short=$(git rev-parse --short "${commit_sha}") + # extract branch name + branch_name=${GITHUB_REF#refs/heads/} + if [[ ${{ github.event_name }} == 'pull_request' ]] + then + branch_name=$(gh pr view "${{ github.event.pull_request.number }}" --json headRefName -q '.headRefName' 2>/dev/null) + fi + + # extract tag from branch name + tag_name=$(echo "$branch_name" | tr / -) + echo "DATE=${commit_date}" >> $GITHUB_ENV echo "VERSION=${commit_version}" >> $GITHUB_ENV echo "COMMIT=${commit_short}" >> $GITHUB_ENV echo "COMMIT_SHA=${commit_sha}" >> $GITHUB_ENV + echo "IMAGE_TAG=${tag_name,,}" >> $GITHUB_ENV + echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV # By default the container image is being pushed to the registry echo "PUSH=true" >> $GITHUB_ENV @@ -470,7 +491,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser to build kubectl plugin - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 if: | github.event_name == 'schedule' || ( @@ -490,7 +511,7 @@ jobs: # Send Slack notification if the kubectl plugin build fails. # To avoid message overflow, we only report runs scheduled on main or release branches - name: Slack Notification - uses: rtCamp/action-slack-notify@v2 + uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2 if: | failure() && github.repository_owner == env.REPOSITORY_OWNER && @@ -510,7 +531,7 @@ jobs: SLACK_MESSAGE: Building kubernetes plugin failed! - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 with: distribution: goreleaser version: v2 @@ -519,190 +540,114 @@ jobs: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} VERSION: ${{ env.VERSION }} - - - name: Docker meta - id: docker-meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.OPERATOR_IMAGE_NAME }} - tags: | - type=ref,event=branch - type=ref,event=pr - - - name: Docker meta UBI8 - id: docker-meta-ubi8 - uses: docker/metadata-action@v5 - with: - images: ${{ env.OPERATOR_IMAGE_NAME }} - flavor: | - suffix=-ubi8 - tags: | - type=ref,event=branch - type=ref,event=pr - - - name: Docker meta UBI9 - id: docker-meta-ubi9 - uses: docker/metadata-action@v5 - with: - images: ${{ env.OPERATOR_IMAGE_NAME }} - flavor: | - suffix=-ubi9 - tags: | - type=ref,event=branch - type=ref,event=pr + RACE: "true" - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} + cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - - name: Build for scan distroless image - uses: docker/build-push-action@v6 - with: - platforms: "linux/amd64" - context: . - file: Dockerfile - push: false - load: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta.outputs.tags }} - - - name: Dockle scan distroless image - uses: erzz/dockle-action@v1 - with: - image: ${{ steps.docker-meta.outputs.tags }} - exit-code: '1' - failure-threshold: WARN - accept-keywords: key - - - name: Build for scan UBI8 image - uses: docker/build-push-action@v6 + - name: Build and push + uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6 + id: bake-push + env: + environment: "testing" + buildVersion: ${{ env.VERSION }} + tag: ${{ env.IMAGE_TAG }} + registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }} + revision: ${{ env.COMMIT }} with: - platforms: "linux/amd64" - context: . - file: Dockerfile-ubi8 - push: false - load: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} + source: . + push: ${{ env.PUSH }} + no-cache: true + targets: "default" - - name: Dockle scan UBI8 image - uses: erzz/dockle-action@v1 + - name: Output images + if: env.PUSH == 'true' env: - DOCKLE_IGNORES: CIS-DI-0009 + DISTROLESS: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['image.name'] }} + UBI: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} + run: | + echo "CONTROLLER_IMG=${DISTROLESS}" >> $GITHUB_ENV + echo "CONTROLLER_IMG_UBI=${UBI}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${UBI}-bundle" >> $GITHUB_ENV + echo "CATALOG_IMG=${UBI}-catalog" >> $GITHUB_ENV + + - name: Dockle scan distroless image + uses: erzz/dockle-action@69369bc745ee29813f730231a821bcd4f71cd290 # v1 + if: env.PUSH == 'true' with: - image: ${{ steps.docker-meta-ubi8.outputs.tags }} + image: ${{ env.CONTROLLER_IMG }} exit-code: '1' failure-threshold: WARN accept-keywords: key - - name: Build for scan UBI9 image - uses: docker/build-push-action@v6 - with: - platforms: "linux/amd64" - context: . - file: Dockerfile-ubi9 - push: false - load: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} - - - name: Dockle scan UBI9 image - uses: erzz/dockle-action@v1 + - name: Dockle scan UBI image + uses: erzz/dockle-action@69369bc745ee29813f730231a821bcd4f71cd290 # v1 + if: env.PUSH == 'true' env: DOCKLE_IGNORES: CIS-DI-0009 with: - image: ${{ steps.docker-meta-ubi9.outputs.tags }} + image: ${{ env.CONTROLLER_IMG_UBI }} exit-code: '1' failure-threshold: WARN accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@master + uses: snyk/actions/docker@e2221410bff24446ba09102212d8bc75a567237d # master if: | - !github.event.repository.fork && + !github.event.repository.fork && !github.event.pull_request.head.repo.fork continue-on-error: true env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} with: - image: ${{ steps.docker-meta.outputs.tags }} + image: ${{ env.CONTROLLER_IMG }} args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 if: | - !github.event.repository.fork && + !github.event.repository.fork && !github.event.pull_request.head.repo.fork continue-on-error: true with: sarif_file: snyk.sarif - - name: Build and push - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile - push: ${{ env.PUSH }} - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta.outputs.tags }} - provenance: ${{ env.BUILD_PUSH_PROVENANCE }} - cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} - cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - - name: Build and push UBI8 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi8 - push: ${{ env.PUSH }} - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} - - - name: Build and push UBI9 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi9 - push: ${{ env.PUSH }} - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} - - - name: Output images - env: - TAGS: ${{ steps.docker-meta.outputs.tags }} - TAGS_UBI8: ${{ steps.docker-meta-ubi8.outputs.tags }} + - name: Install cosign + if: | + env.SIGN_IMAGES == 'true' && + env.PUSH == 'true' + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3 + # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ + # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on + # how to use cosign. + + - name: Sign images + if: | + env.SIGN_IMAGES == 'true' && + env.PUSH == 'true' run: | - LOWERCASE_OPERATOR_IMAGE_NAME=${OPERATOR_IMAGE_NAME,,} - TAG=${TAGS#*:} - TAG_UBI=${TAGS_UBI8#*:} - echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV - echo "CONTROLLER_IMG_UBI8=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV - echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV - echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV + images=$(echo '${{ steps.bake-push.outputs.metadata }}' | + jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + ) + cosign sign --yes ${images} + olm-bundle: name: Create OLM bundle and catalog - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 permissions: contents: read packages: write @@ -714,27 +659,28 @@ jobs: needs.buildx.outputs.push == 'true' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: fetch-depth: 0 ref: ${{ needs.buildx.outputs.commit }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} + cache-image: false - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -742,14 +688,14 @@ jobs: - name: Create bundle env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} run: | make olm-catalog - name: Archive the bundle manifests - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: bundle path: | @@ -760,7 +706,7 @@ jobs: preflight: name: Run openshift-preflight test - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - buildx - olm-bundle @@ -769,10 +715,10 @@ jobs: needs.olm-bundle.result == 'success' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -782,7 +728,7 @@ jobs: make operator-sdk preflight - name: Loging to container registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -790,14 +736,14 @@ jobs: - name: Run preflight container test env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }} PFLT_ARTIFACTS: "preflight_results" run: | bin/preflight check container ${CONTROLLER_IMG} \ --docker-config $HOME/.docker/config.json - name: Archive the preflight results - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: preflight_results path: | @@ -816,7 +762,7 @@ jobs: olm-scorecard: name: Run OLM scorecard test - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - buildx - olm-bundle @@ -826,31 +772,32 @@ jobs: github.repository_owner == 'cloudnative-pg' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Setting up KinD cluster - uses: helm/kind-action@v1.10.0 + uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 with: wait: "600s" version: ${{ env.KIND_VERSION }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} + cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -867,7 +814,7 @@ jobs: matrix: test: [ kiwi, lemon, orange ] name: Run OLM ${{ matrix.test }} test - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - buildx - olm-bundle @@ -883,20 +830,20 @@ jobs: OPP_RELEASE_INDEX_NAME: "catalog_tmp" steps: - name: Checkout community-operators - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: k8s-operatorhub/community-operators persist-credentials: false - name: Login into docker registry - uses: redhat-actions/podman-login@v1 + uses: redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603 # v1 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Download the bundle - uses: actions/download-artifact@v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: bundle diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index fd5a05ca2c..7da5b855e6 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -16,10 +16,7 @@ on: description: 'Limit to the specified engines list (eks, aks, gke, kind, ocp)' required: false -permissions: - contents: write - pull-requests: write - issues: read +permissions: read-all defaults: run: @@ -33,23 +30,23 @@ env: jobs: check-public-clouds-k8s-versions: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - # There is no command to get EKS k8s versions, we have to parse the documentation name: Get updated EKS versions run: | - DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/main/doc_source/kubernetes-versions.md" - curl --silent "${DOC_URL}" | grep -E '^\+ `[0-9]\.[0-9]{2}`$' | sed -e 's/[\ +`]//g' | \ + DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/versioning/kubernetes-versions-standard.adoc" + curl --silent "${DOC_URL}" | sed -e 's/.*Kubernetes \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \ awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \ jq -Rn '[inputs]' | tee .github/eks_versions.json if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks' - name: Azure Login - uses: azure/login@v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks' @@ -57,26 +54,26 @@ jobs: name: Get updated AKS versions run: | az aks get-versions --location westeurope \ - --query 'reverse(sort(values[? isPreview != `true`].patchVersions.keys(@)[]))' -o tsv | \ + --query "reverse(sort(values[? isPreview != 'true' && contains(capabilities.supportPlan, 'KubernetesOfficial')].patchVersions.keys(@)[]))" -o tsv | \ sort -urk 1,1.5 | \ awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \ jq -Rn '[inputs]' | tee .github/aks_versions.json if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks' - name: 'Auth GKE' - uses: 'google-github-actions/auth@v2' + uses: 'google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093' # v3 with: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Set up Cloud SDK for GKE - uses: google-github-actions/setup-gcloud@v2 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3 with: project_id: ${{ secrets.GCP_PROJECT_ID }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Install YQ - uses: frenck/action-setup-yq@v1 + uses: frenck/action-setup-yq@c4b5be8b4a215c536a41d436757d9feb92836d4f # v1 if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Get updated GKE versions @@ -109,8 +106,10 @@ jobs: - name: Get updated OpenShift versions run: | + # We limit the range starting on 4 to 9 to skip the 13 version + # this needs to be updated when the 15 version is also EOL curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/ | \ - grep -e 'href.*"4\.1[2-9]\.[0-9].*"' | \ + grep -e 'href.*"4\.1[24-9]\.[0-9].*"' | \ sed -e 's/\(.*\)href="\(4\.1[2-9]\)\(.*\)/\2/' | \ sort -Vru | \ awk -vv="$MINIMAL_OCP" '$0>=v {print $0}' | \ @@ -121,7 +120,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'ocp' - name: Create Pull Request if versions have been updated - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: token: ${{ secrets.REPO_GHA_PAT }} title: "feat: Public Cloud K8S versions update" diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index 88e57afde5..29021732b8 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -8,25 +8,27 @@ on: - cron: "30 0 * * *" workflow_dispatch: +permissions: read-all + defaults: run: shell: "bash -Eeuo pipefail -x {0}" jobs: check-latest-postgres-version: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Set up Python 3.9 - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: 3.9 + python-version: 3.13 - name: Install Python dependencies run: | - pip install packaging + pip install packaging==25.0 - name: Generate PostgreSQL JSON files run: | @@ -67,10 +69,9 @@ jobs: - name: Create PR to update PostgreSQL version if: env.LATEST_POSTGRES_VERSION_IMAGE != env.CURRENT_POSTGRES_VERSION_IMAGE - uses: peter-evans/create-pull-request@v7 - env: - GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }} + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: + token: ${{ secrets.REPO_GHA_PAT }} title: "feat: update default PostgreSQL version to ${{ env.LATEST_POSTGRES_VERSION }}" body: "Update default PostgreSQL version from ${{ env.CURRENT_POSTGRES_VERSION }} to ${{ env.LATEST_POSTGRES_VERSION }}" branch: "postgres-versions-update" @@ -80,10 +81,9 @@ jobs: - name: Create Pull Request if postgresql versions have been updated if: env.LATEST_POSTGRES_VERSION_IMAGE == env.CURRENT_POSTGRES_VERSION_IMAGE - uses: peter-evans/create-pull-request@v7 - env: - GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }} + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: + token: ${{ secrets.REPO_GHA_PAT }} title: "test: Updated Postgres versions used in E2E tests" body: "Update the Postgres versions used in E2E tests" branch: "postgres-versions-update" diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml new file mode 100644 index 0000000000..be3454df15 --- /dev/null +++ b/.github/workflows/ossf_scorecard.yml @@ -0,0 +1,79 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + workflow_dispatch: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '17 9 * * 1' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled. + if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request' + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore + # file_mode: git + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 + with: + sarif_file: results.sarif diff --git a/.github/workflows/pr_verify_linked_issue.yml b/.github/workflows/pr_verify_linked_issue.yml index 6cc10ece1c..a0a32f4e33 100644 --- a/.github/workflows/pr_verify_linked_issue.yml +++ b/.github/workflows/pr_verify_linked_issue.yml @@ -15,6 +15,8 @@ on: - labeled - unlabeled +permissions: read-all + jobs: verify_linked_issue: runs-on: ubuntu-latest @@ -22,8 +24,6 @@ jobs: if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-issue') }} steps: - name: Verify Linked Issue - uses: hattan/verify-linked-issue-action@v1.1.5 + uses: hattan/verify-linked-issue-action@2d8e2e47a462cc7b07ba5e6cab6f9d57bd36672e # v1.1.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml new file mode 100644 index 0000000000..202e176cb4 --- /dev/null +++ b/.github/workflows/refresh-licenses.yml @@ -0,0 +1,44 @@ +# Refresh the "licenses" directory and create a PR if there are any changes + +name: Refresh licenses directory +on: + workflow_dispatch: + schedule: + - cron: "30 0 * * 1" + +permissions: read-all + +env: + # renovate: datasource=golang-version depName=golang versioning=loose + GOLANG_VERSION: "1.25.0" + +jobs: + licenses: + name: Refresh licenses + runs-on: ubuntu-24.04 + steps: + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + + - name: Install Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 + with: + go-version: ${{ env.GOLANG_VERSION }} + check-latest: true + + - name: Generate licenses + run: | + make licenses + + - name: Create Pull Request if licenses have been updated + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 + with: + token: ${{ secrets.REPO_GHA_PAT }} + title: "chore: refresh licenses directory" + body: "Refresh the licenses directory" + branch: "license-updater" + author: "license-updater " + add-paths: | + licenses/** + commit-message: "chore: refresh licenses directory" + signoff: true diff --git a/.github/workflows/registry-clean.yml b/.github/workflows/registry-clean.yml index c57ff2a859..ac3df3175d 100644 --- a/.github/workflows/registry-clean.yml +++ b/.github/workflows/registry-clean.yml @@ -9,34 +9,32 @@ on: env: IMAGE_NAME: "cloudnative-pg-testing" - ORG_NAME: "cloudnative-pg" - SNOK_TOKEN: ${{ secrets.REPO_GHA_PAT }} CONTAINER_IMAGE_NAMES: "pgbouncer-testing, postgresql-testing, postgis-testing" +permissions: read-all + jobs: clean-ghcr: name: delete old testing container images + permissions: + packages: write runs-on: ubuntu-latest steps: - # once issue https://github.com/snok/container-retention-policy/issues/33 is fixed - # we can merge the two steps into one - - name: Delete '-testing' images for ${{ env.IMAGE_NAME }} - uses: snok/container-retention-policy@v2 + - name: Delete '-testing' operator images in ${{ env.IMAGE_NAME }} + uses: snok/container-retention-policy@4f22ef80902ad409ed55a99dc5133cc1250a0d03 # v3.0.0 with: image-names: ${{ env.IMAGE_NAME }} - cut-off: 5 days ago UTC - keep-at-least: 1 - account-type: org - org-name: ${{ env.ORG_NAME }} - # use the GITHUB_TOKEN when issue https://github.com/snok/container-retention-policy/issues/27 is fixed - token: ${{ env.SNOK_TOKEN }} - - name: Delete '-testing' images for containers - uses: snok/container-retention-policy@v2 + cut-off: 5d + keep-n-most-recent: 1 + account: ${{ github.repository_owner }} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Delete '-testing' operand images + uses: snok/container-retention-policy@4f22ef80902ad409ed55a99dc5133cc1250a0d03 # v3.0.0 + if: ${{ github.repository_owner == 'cloudnative-pg' }} with: image-names: ${{ env.CONTAINER_IMAGE_NAMES }} - cut-off: A week ago UTC - keep-at-least: 1 - account-type: org - org-name: ${{ env.ORG_NAME }} - # use the GITHUB_TOKEN when issue https://github.com/snok/container-retention-policy/issues/27 is fixed - token: ${{ env.SNOK_TOKEN }} + cut-off: 1w + keep-n-most-recent: 1 + account: "cloudnative-pg" + token: ${{ secrets.REPO_GHA_PAT }} diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index d5ad3a7278..70644b5b61 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -2,6 +2,8 @@ name: release-pr +permissions: read-all + on: push: branches: @@ -9,11 +11,14 @@ on: jobs: pull-request: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + pull-requests: write + contents: write steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Get tag run: | @@ -28,7 +33,7 @@ jobs: - name: Pull Request id: open-pr - uses: repo-sync/pull-request@v2.12 + uses: repo-sync/pull-request@572331753c3787dee4a6c0b6719c889af9646b81 # v2.12 with: destination_branch: ${{ env.DEST }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index ae6ee87f8a..b71b43bd0a 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -7,26 +7,25 @@ on: tags: - v* -env: - GOLANG_VERSION: "1.23.x" - CNPG_IMAGE_NAME: "ghcr.io/${{ github.repository }}" +permissions: read-all -permissions: - contents: write - packages: write +env: + # renovate: datasource=golang-version depName=golang versioning=loose + GOLANG_VERSION: "1.25.0" + REGISTRY: "ghcr.io" jobs: check-version: name: Evaluate release tag - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: is_latest: ${{ env.IS_LATEST }} is_stable: ${{ env.IS_STABLE }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -49,13 +48,15 @@ jobs: release: name: Create Github release - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + contents: write needs: - check-version steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Get tag run: | @@ -74,7 +75,7 @@ jobs: /src/docs/src/${{ env.FILE }} - name: Release - uses: softprops/action-gh-release@v2 + uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2 with: body_path: release_notes.md draft: false @@ -87,25 +88,29 @@ jobs: release-binaries: name: Build containers - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + packages: write + contents: write + id-token: write needs: - check-version outputs: - version: ${{ steps.build-meta.outputs.version }} + version: ${{ env.IMAGE_TAG }} author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} - digest: ${{ steps.build.outputs.digest }} platforms: ${{ env.PLATFORMS }} + olm_img: ${{ steps.olm-image.outputs.olm_image }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -113,10 +118,9 @@ jobs: name: Build meta id: build-meta run: | - images='ghcr.io/cloudnative-pg/cloudnative-pg' - images="${images},ghcr.io/cloudnative-pg/cloudnative-pg-testing" commit_sha=${{ github.sha }} commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}") + tag="${GITHUB_REF#refs/tags/v}" # get git user and email author_name=$(git show -s --format='%an' "${commit_sha}") @@ -125,16 +129,18 @@ jobs: # use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0) commit_version=$(git describe --tags --match 'v*' "${commit_sha}"| sed -e 's/^v//; s/-g[0-9a-f]\+$//; s/-\([0-9]\+\)$/-dev\1/') commit_short=$(git rev-parse --short "${commit_sha}") - echo "IMAGES=${images}" >> $GITHUB_ENV + echo "DATE=${commit_date}" >> $GITHUB_ENV - echo "version=${commit_version}" >> $GITHUB_OUTPUT + echo "VERSION=${commit_version}" >> $GITHUB_ENV + echo "IMAGE_TAG=${tag}" >> $GITHUB_ENV + echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV echo "COMMIT=${commit_short}" >> $GITHUB_ENV echo "author_name=${author_name}" >> $GITHUB_OUTPUT echo "author_email=${author_email}" >> $GITHUB_OUTPUT - name: Import GPG key id: import_gpg - uses: crazy-max/ghaction-import-gpg@v6 + uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6 with: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.GPG_PASSPHRASE }} @@ -148,7 +154,7 @@ jobs: echo "$GPG_PRIVATE_KEY" > gpg_signing_key.asc - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 with: distribution: goreleaser version: v2 @@ -156,7 +162,7 @@ jobs: env: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} - VERSION: ${{ steps.build-meta.outputs.version }} + VERSION: ${{ env.VERSION }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} NFPM_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} @@ -165,43 +171,9 @@ jobs: if: | needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' - uses: rajatjindal/krew-release-bot@v0.0.46 + uses: rajatjindal/krew-release-bot@3d9faef30a82761d610544f62afddca00993eef9 # v0.0.47 with: krew_template_file: dist/krew/cnpg.yaml - - - name: Docker meta - id: docker-meta - uses: docker/metadata-action@v5 - env: - IS_LATEST: ${{ needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' }} - with: - images: ${{ env.IMAGES }} - flavor: | - latest=${{ env.IS_LATEST }} - tags: | - type=semver,pattern={{version}} - - - name: Docker meta UBI8 - id: docker-meta-ubi8 - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - flavor: | - latest=false - suffix=-ubi8 - tags: | - type=semver,pattern={{version}} - - - name: Docker meta UBI9 - id: docker-meta-ubi9 - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - flavor: | - latest=false - suffix=-ubi9 - tags: | - type=semver,pattern={{version}} - name: Detect platforms run: | @@ -211,56 +183,65 @@ jobs: echo "PLATFORMS=${platforms}" >> $GITHUB_ENV - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login to ghcr.io - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: - registry: ghcr.io + registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6 + id: bake-push + env: + environment: "production" + buildVersion: ${{ env.VERSION }} + tag: ${{ env.IMAGE_TAG }} + registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }} + revision: ${{ env.COMMIT }} + latest: ${{ needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' }} with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile + source: . push: true - build-args: | - VERSION=${{ steps.build-meta.outputs.version }} - tags: ${{ steps.docker-meta.outputs.tags }} + no-cache: true + targets: "default" - - name: Build and push UBI8 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi8 - push: true - build-args: | - VERSION=${{ steps.build-meta.outputs.version }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} + name: Install cosign + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3 + # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ + # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on + # how to use cosign. - - name: Build and push UBI9 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi9 - push: true - build-args: | - VERSION=${{ steps.build-meta.outputs.version }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} + name: Sign images + run: | + images=$(echo '${{ steps.bake-push.outputs.metadata }}' | + jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + ) + cosign sign --yes ${images} + - + # Bake returns all the tags for a target's variant in a comma separated list. + # We only care about a single tag for OLM, so we remove the "latest" tag and + # pick the first entry in order from what's left in the list + name: Image for OLM + id: olm-image + env: + ubi_tags: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} + run: | + echo "olm_image=$(echo "$ubi_tags" | tr ',' '\n' | grep -v 'latest' | sed 's/^ *//g' | head -n 1)" >> $GITHUB_OUTPUT olm-bundle: name: Create OLM bundle and catalog - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + contents: write + packages: write needs: - check-version - release-binaries @@ -271,44 +252,41 @@ jobs: needs.check-version.outputs.is_stable == 'true' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: fetch-depth: 0 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ needs.release-binaries.outputs.platforms }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login to ghcr.io - uses: docker/login-action@v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: - registry: ghcr.io + registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set bundle variables + env: + OLM_IMG: ${{ needs.release-binaries.outputs.olm_img }} run: | - tag="${GITHUB_REF#refs/tags/v}" - version="${tag#v}" - LOWERCASE_CNPG_IMAGE_NAME=${CNPG_IMAGE_NAME,,} - echo "IMAGE_NAME=${LOWERCASE_CNPG_IMAGE_NAME}" >> $GITHUB_ENV - echo "CONTROLLER_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:${version}-ubi8" >> $GITHUB_ENV - echo "BUNDLE_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:bundle-${version}" >> $GITHUB_ENV - echo "CATALOG_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:catalog-${version}" >> $GITHUB_ENV + echo "CONTROLLER_IMG=${OLM_IMG}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${OLM_IMG}-bundle" >> $GITHUB_ENV + echo "CATALOG_IMG=${OLM_IMG}-catalog" >> $GITHUB_ENV - name: Create bundle env: - IMAGE_NAME: ${{ env.IMAGE_NAME }} CONTROLLER_IMG: ${{ env.CONTROLLER_IMG }} BUNDLE_IMG: ${{ env.BUNDLE_IMG }} CATALOG_IMG: ${{ env.CATALOG_IMG }} @@ -316,7 +294,7 @@ jobs: make olm-catalog - name: Archive the bundle manifests - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: bundle path: | @@ -326,7 +304,7 @@ jobs: operatorhub_pr: name: Create remote PR for OperatorHub - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - release-binaries - olm-bundle @@ -337,14 +315,14 @@ jobs: VERSION: ${{ needs.release-binaries.outputs.version }} steps: - name: Checkout community-operators - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: k8s-operatorhub/community-operators fetch-depth: 0 persist-credentials: false - name: Download the bundle - uses: actions/download-artifact@v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: bundle @@ -355,7 +333,7 @@ jobs: rm -fr cloudnative-pg-catalog.yaml bundle.Dockerfile *.zip bundle/ - name: Create Remote Pull Request - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: token: ${{ secrets.REPO_GHA_PAT }} commit-message: "operator cloudnative-pg (${{ env.VERSION }})" @@ -410,11 +388,11 @@ jobs: github.repository_owner == 'cloudnative-pg' env: VERSION: ${{ needs.release-binaries.outputs.version }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout artifact - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} @@ -427,7 +405,7 @@ jobs: git config user.name "${{ needs.release-binaries.outputs.author_name }}" - name: Download the bundle - uses: actions/download-artifact@v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: bundle - @@ -449,7 +427,7 @@ jobs: git commit -sm "${COMMIT_MESSAGE}" - name: Push commit - uses: ad-m/github-push-action@v0.8.0 + uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0 with: github_token: ${{ secrets.REPO_GHA_PAT }} repository: cloudnative-pg/artifacts diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index b9fcc094af..b6f8fc32f8 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -11,17 +11,19 @@ on: paths: - 'pkg/versions/versions.go' +permissions: read-all + jobs: tag: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Create tag if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/v') - uses: christophebedard/tag-version-commit@v1.7.0 + uses: christophebedard/tag-version-commit@57ffb155fc61c8ab098fcfa273469b532c1d4ce7 # v1.7.0 with: token: ${{ secrets.REPO_GHA_PAT }} version_regex: '^Version tag to ([0-9]+\.[0-9]+\.[0-9]+(?:-[a-z][0-9a-z]*)?)' diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index de18c2dcae..66638a0c73 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -10,17 +10,16 @@ on: - labeled - unlabeled -env: - REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} +permissions: read-all jobs: require-labels: name: Require labels - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Require labels - uses: docker://agilepathway/pull-request-label-checker:v1.6.55 + uses: agilepathway/label-checker@c3d16ad512e7cea5961df85ff2486bb774caf3c5 # v1.6.65 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" - repo_token: ${{ env.REPO_TOKEN }} + repo_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 81e3bba54f..e64635fdab 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -8,23 +8,41 @@ on: - main workflow_dispatch: +permissions: read-all + jobs: security: name: Security scan - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + permissions: + security-events: write steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + + - name: Static Code Analysis + uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # 0.4.0 + continue-on-error: true + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + command: 'code test' + args: --sarif-file-output=snyk-static.sarif + + - name: Upload result to GitHub Code Scanning + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 + with: + sarif_file: snyk-static.sarif - - name: Run Snyk to check for vulnerabilities - uses: snyk/actions/golang@0.4.0 + - name: Vulnerability scan + uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # 0.4.0 continue-on-error: true env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} with: - args: --sarif-file-output=snyk.sarif + args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: - sarif_file: snyk.sarif + sarif_file: snyk-test.sarif diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 8dd5b1403c..c27e21723d 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -3,18 +3,20 @@ on: push: workflow_dispatch: +permissions: read-all + jobs: # Check code for non-inclusive language woke: name: Run woke - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: woke - uses: get-woke/woke-action@v0 + uses: get-woke/woke-action@b2ec032c4a2c912142b38a6a453ad62017813ed0 # v0 with: # Cause the check to fail on any broke rules fail-on-error: true @@ -22,10 +24,10 @@ jobs: # Enforce en-us spell check spellcheck: name: Run spellcheck - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.42.0 + uses: rojopolis/spellcheck-github-actions@35a02bae020e6999c5c37fabaf447f2eb8822ca7 # 0.51.0 diff --git a/.github/workflows/sync-api.yml b/.github/workflows/sync-api.yml new file mode 100644 index 0000000000..84ec9a8d86 --- /dev/null +++ b/.github/workflows/sync-api.yml @@ -0,0 +1,19 @@ +name: Sync API + +on: + push: + branches: + - main + +permissions: read-all + +jobs: + trigger-sync: + runs-on: ubuntu-latest + steps: + - name: Invoke repository dispatch + uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3 + with: + token: ${{ secrets.REPO_GHA_PAT }} + repository: cloudnative-pg/api + event-type: sync-api diff --git a/.golangci.yml b/.golangci.yml index 07c5b16eb7..896adb3cc2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,45 +1,25 @@ -linters-settings: - lll: - line-length: 120 - gci: - sections: - - standard - - default - - prefix(github.com/cloudnative-pg/cloudnative-pg) - - blank - - dot - gosec: - excludes: - - G101 # remove this exclude when https://github.com/securego/gosec/issues/1001 is fixed - +version: "2" linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true + default: none enable: - asciicheck - bodyclose + - copyloopvar - dogsled - dupl - durationcheck - errcheck - - copyloopvar - - gci + - ginkgolinter - gocognit - goconst - gocritic - gocyclo - - gofmt - - gofumpt - goheader - - goimports - gomoddirectives - gomodguard - goprintffuncname - gosec - - gosimple - govet - - ginkgolinter - importas - ineffassign - lll @@ -53,78 +33,69 @@ linters: - rowserrcheck - sqlclosecheck - staticcheck - - stylecheck - thelper - tparallel - - typecheck - unconvert - unparam - unused + - usestdlibvars - wastedassign - whitespace - - # to be checked: - # - errorlint - # - forbidigo - # - forcetypeassert - # - goerr113 - # - ifshort - # - nilerr - # - nlreturn - # - noctx - # - nolintlint - # - paralleltest - # - promlinter - # - tagliatelle - # - wrapcheck - - # don't enable: - # - cyclop - # - depguard - # - exhaustive - # - exhaustivestruct - # - funlen - # - gochecknoglobals - # - gochecknoinits - # - godot - # - godox - # - gomnd - # - testpackage - # - wsl - - # deprecated: - # - deadcode - # - golint - # - interfacer - # - maligned - # - scopelint - # - structcheck - # - varcheck - -run: - timeout: 5m - -issues: - exclude-rules: - # Allow dot imports for ginkgo and gomega - - source: ginkgo|gomega - linters: - - revive - text: "should not use dot imports" - # Exclude some linters from running on tests files. - - path: _test\.go - linters: - - goconst - # Exclude lll issues for lines with long annotations - - linters: - - lll - source: "//\\s*\\+" - # We have no control of this in zz_generated files and it looks like that excluding those files is not enough - # so we disable "ST1016: methods on the same type should have the same receiver name" in api directory - - linters: - - stylecheck - text: "ST1016:" - path: api/ - exclude-use-default: false - exclude-files: - - zz_generated.* + settings: + gosec: + excludes: + - G101 + staticcheck: + checks: + - all + - '-QF1001' + - '-QF1007' + lll: + line-length: 120 + exclusions: + generated: lax + rules: + - linters: + - revive + text: should not use dot imports + source: ginkgo|gomega + - linters: + - goconst + path: _test\.go + - linters: + - lll + source: //\s*\+ + - linters: + - staticcheck + path: api/ + text: 'ST1016:' + - linters: + - revive + path: /(utils|common)/[^/]+.go + text: avoid meaningless package names + paths: + - zz_generated.* + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + settings: + gci: + sections: + - standard + - default + - prefix(github.com/cloudnative-pg/cloudnative-pg) + - blank + - dot + exclusions: + generated: lax + paths: + - zz_generated.* + - third_party$ + - builtin$ + - examples$ diff --git a/.goreleaser.yml b/.goreleaser.yml index bfa792b814..1b6e40c5b8 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -33,6 +33,27 @@ builds: - amd64 - arm64 +- id: manager-race + binary: manager/manager_{{ .Arch }} + main: cmd/manager/main.go + no_unique_dist_dir: true + skip: >- + {{ if and (isEnvSet "RACE") (eq .Env.RACE "true") }}false{{ else }}true{{ end }} + gcflags: + - all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}} + ldflags: + - -race + - -s + - -w + - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion={{.Env.VERSION}} + - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit={{.Env.COMMIT}} + - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate={{.Env.DATE}} + goos: + - linux + goarch: + - amd64 + - arm64 + - id: kubectl-cnpg binary: kubectl-cnpg main: cmd/kubectl-cnpg/main.go @@ -50,9 +71,7 @@ builds: - windows goarch: - amd64 - - 386 - arm64 - - arm - ppc64le - s390x goarm: @@ -60,8 +79,6 @@ builds: - 6 - 7 ignore: - - goos: darwin - goarch: 386 - goos: windows goarch: ppc64le - goos: windows @@ -72,9 +89,8 @@ archives: kubectl-cnpg_{{ .Version }}_ {{- .Os }}_ {{- if eq .Arch "amd64" }}x86_64 - {{- else if eq .Arch "386" }}i386 {{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }} - builds: + ids: - kubectl-cnpg nfpms: @@ -83,12 +99,11 @@ nfpms: kubectl-cnpg_{{ .Version }}_ {{- .Os }}_ {{- if eq .Arch "amd64" }}x86_64 - {{- else if eq .Arch "386" }}i386 {{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }} homepage: https://github.com/cloudnative-pg/cloudnative-pg bindir: /usr/local/bin maintainer: 'Marco Nenciarini ' - builds: + ids: - kubectl-cnpg formats: - rpm diff --git a/.spellcheck.yaml b/.spellcheck.yaml index 540285ffcd..d60d3bf957 100644 --- a/.spellcheck.yaml +++ b/.spellcheck.yaml @@ -3,6 +3,7 @@ matrix: sources: - 'docs/src/*.md' - 'docs/src/*/*.md' + - 'config/olm-manifests/bases/*.yaml' default_encoding: utf-8 aspell: lang: en @@ -25,4 +26,6 @@ matrix: close: '(?P=open)' - open: '(?P)' + - open: '.*base64data.*' + close: "$" - pyspelling.filters.url: diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 26de5d448c..c9b9647ac1 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -2,6 +2,7 @@ AES API's APIs ARMv +AUR AZ AZs AcolumnName @@ -9,6 +10,7 @@ AdditionalCommandArgs AdditionalPodAffinity AdditionalPodAntiAffinity AffinityConfiguration +AllNamespaces AntiAffinity AppArmor AppArmorProfile @@ -23,6 +25,7 @@ AzureCredentials AzurePVCUpdateEnabled Azurite BDR +BUSL BackupCapabilities BackupConfiguration BackupFrom @@ -48,6 +51,7 @@ BootstrapPgBaseBackup BootstrapRecovery Burstable ByStatus +CAs CIS CKA CN @@ -72,6 +76,7 @@ Ceph CertificatesConfiguration CertificatesStatus Certmanager +CiliumNetworkPolicy ClassName ClientCASecret ClientCertsCASecret @@ -118,7 +123,10 @@ DISA DNS DataBackupConfiguration DataBase +DataDurabilityLevel DataSource +DatabaseObjectSpec +DatabaseObjectStatus DatabaseReclaimPolicy DatabaseRoleRef DatabaseSpec @@ -139,6 +147,7 @@ EDB EKS EOF EOL +ESO EmbeddedObjectMetadata EnablePDB EncryptionType @@ -152,8 +161,18 @@ EnvVar EphemeralVolumeSource EphemeralVolumesSizeLimit EphemeralVolumesSizeLimitConfiguration +ExtensionConfiguration +ExtensionSpec +ExtensionStatus ExternalCluster +FDW +FDWSpec +FDWs FQDN +FQDNs +FailoverQuorum +FailoverQuorumSpec +FailoverQuorumStatus Fei Filesystem Fluentd @@ -169,6 +188,7 @@ GUC GUCs Gabriele GaugeVec +GeoSpatial Gi GitOps GoArch @@ -192,26 +212,36 @@ IfNotPresent ImageCatalog ImageCatalogRef ImageCatalogSpec +ImageInfo +ImageVolume +ImageVolumeSource ImportSource InfoSec Innocenti InstanceID InstanceReportedState +IsolationCheckConfiguration +Isovalent Istio Istio's JSON Jihyuk Jitendra +KV +Karpenter KinD Krew KubeCon Kubegres +KubernetesClusterDomain Kumar LDAP LDAPBindAsAuth LDAPBindSearchAuth LDAPConfig LDAPScheme +LF +LLC LPV LSN LTS @@ -242,6 +272,7 @@ Milsted MinIO Minikube MonitoringConfiguration +MultiNamespace NFS NGINX NOBYPASSRLS @@ -250,6 +281,7 @@ NOCREATEROLE NOSUPERUSER Namespaces Nenciarini +NetworkPolicy Niccolò NodeAffinity NodeMaintenanceWindow @@ -257,6 +289,7 @@ NodeSelector NodesUsed Noland O'Reilly +OCI OLAP OLTP OOM @@ -268,18 +301,24 @@ OngoingSnapshotBackups OnlineConfiguration OnlineUpdateEnabled OnlineUpgrading +OpenBao +OpenID OpenSSL OpenShift Openshift OperatorCapabilities OperatorGroup OperatorHub +OptionSpec +OptionSpecValue +OwnNamespace PDB PDBs PGAudit PGDATA PGDG PGData +PGDataImageInfo PGSQL PKI PODNAME @@ -300,7 +339,6 @@ PgBouncerSecrets PgBouncerSecretsVersions PgBouncerSpec Philippe -PluginConfigurationList PluginStatus PoLA PodAffinity @@ -337,11 +375,25 @@ PrimaryUpdateMethod PrimaryUpdateStrategy PriorityClass PriorityClassName +ProbeStrategyType +ProbeTerminationGracePeriod +ProbeWithStrategy +ProbesConfiguration ProjectedVolumeSource +Promotable +PublicationReclaimPolicy +PublicationSpec +PublicationStatus +PublicationTarget +PublicationTargetAllTables +PublicationTargetObject +PublicationTargetTable PullPolicy +PushSecret QoS Quaresima QuickStart +QuorumFailoverProtection RBAC README RHSA @@ -361,6 +413,8 @@ ReplicationTLSSecret ResizingPVC ResourceRequirements ResourceVersion +RestoreJobHook +RestoreJobHookCapabilities RetentionPolicy RoleBinding RoleConfiguration @@ -374,27 +428,34 @@ RuntimeDefault Ruocco SANs SAS +SBOM SCC SCCs SDK SELinux SHA SLA +SLSA +SPDX SPoF SQLQuery SQLRefs SSL SSZ STORAGEACCOUNTNAME +Scaleway ScheduledBackup ScheduledBackupList ScheduledBackupSpec ScheduledBackupStatus ScheduledBackups +SchemaSpec Scorsolini +Seccomp SeccompProfile SecretKeySelector SecretRefs +SecretStore SecretVersion SecretsResourceVersion SecurityProfiles @@ -413,24 +474,35 @@ ServiceUpdateStrategy SetStatusInCluster ShutdownCheckpointToken Silvela +SingleNamespace Slonik SnapshotOwnerReference SnapshotType Snapshotting Snyk Stackgres +StandbyNames +StandbyNumber +StartupProbe +StartupStrategyType StatefulSets StorageClass StorageConfiguration Storages +SubscriptionReclaimPolicy +SubscriptionSpec +SubscriptionStatus SuccessfullyExtracted +SuperUserSecret SwitchReplicaClusterStatus SyncReplicaElectionConstraints SynchronizeReplicas SynchronizeReplicasConfiguration SynchronousReplicaConfiguration SynchronousReplicaConfigurationMethod +SynchronousStandbyNamesList Synopsys +SystemID TCP TLS TLSv @@ -457,7 +529,9 @@ UTF Uncomment Unrealizable UpdateStrategy +UsageSpec VLDB +VLDBs VM VMs VOLNAME @@ -470,6 +544,7 @@ VolumeSnapshotConfiguration VolumeSnapshots WAL WAL's +WALArchiver WALBackupConfiguration WALCapabilities WALs @@ -481,6 +556,7 @@ YXBw YY YYYY Zalando +Zstandard abd accessKeyId accessModes @@ -492,14 +568,17 @@ addons affinityconfiguration aks albert +allTables allnamespaces alloc allocator allowConnections allowPrivilegeEscalation allowVolumeExpansion +alm amd angus +anonymization api apiGroup apiGroups @@ -508,6 +587,7 @@ apidoc apimachinery apis apiserver +apiservicedefinitions apparmor appdb applicationCredentials @@ -524,6 +604,7 @@ authQuery authQuerySecret authn authz +autocompletion autoscaler autovacuum availableArchitectures @@ -535,6 +616,7 @@ azurite ba backend backends +backoff backport backported backporting @@ -569,11 +651,13 @@ bindPassword bindSearchAuth bitmask bool +booleanSwitch bootstrapconfiguration bootstrapinitdb bootstraprecovery br bs +builtinLocale bw byStatus bypassrls @@ -609,13 +693,16 @@ clusterName clusterimagecatalogs clusterlist clusterrole +clusterserviceversions clusterspec clusterstatus cmd cn cnp cnpg +codebase codeready +collationVersion columnValue commandError commandOutput @@ -630,11 +717,14 @@ configmapkeyselector configmaps configs configurability +congruency conn connectionLimit connectionParameters connectionString +connectionTimeout conninfo +containerImage containerPort controldata coredump @@ -647,6 +737,7 @@ cpu crc crds crdview +createdAt createdb createrole createuser @@ -655,6 +746,7 @@ creds cron crt cryptographic +cryptographically csvlog csvs ctl @@ -666,11 +758,13 @@ currentPrimaryTimestamp customQueriesConfigMap customQueriesSecret customizable +customresourcedefinitions cutover cyber dT danglingPVC dataChecksums +dataDurability databackupconfiguration databaseReclaimPolicy datacenter @@ -697,6 +791,9 @@ dir disableDefaultQueries disablePassword disabledDefaultServices +discoverable +displayName +displayName distro distroless distros @@ -710,6 +807,7 @@ downtimes dvcmQ dwm dx +eBPF ecdsa edb eks @@ -735,17 +833,22 @@ executables expirations extensibility externalCluster +externalClusterName externalClusterSecretVersion externalClusters externalclusters facto failover failoverDelay +failoverquorums failovers +failureThreshold faq fastpath fb fd +fdw +fdws ffd fieldPath fieldref @@ -753,14 +856,18 @@ filesystem finalizer findstr fio +fips firstRecoverabilityPoint firstRecoverabilityPointByMethod +fqdn freddie fuzzystrmatch +gRPC gapped gc gcc gce +gcp gcs gcsCredentials geocoder @@ -795,11 +902,15 @@ http httpGet https hugepages +icu +icuLocale +icuRules ident imageCatalogRef imageName imagePullPolicy imagePullSecrets +imageVolume imagecatalogs img immediateCheckpoint @@ -813,8 +924,11 @@ inheritedMetadata init initDB initdb -initialise +initialDelaySeconds initializingPVC +inplace +installModes +installplans instanceID instanceName instanceNames @@ -828,6 +942,8 @@ ipcs ips isPrimary isTemplate +isWALArchiver +isolationCheck issuecomment italy jdbc @@ -837,12 +953,14 @@ json jsonpath kb kbytes +keepalive kms kube kubebuilder kubectl kubelet kubernetes +kubernetesClusterDomain labelColumnName labelColumnValue labelName @@ -859,6 +977,7 @@ lastSuccessfulBackupByMethod latestGeneratedNode latn lc +ld ldap ldapBindPassword ldaps @@ -880,6 +999,7 @@ livenessProbeTimeout lm localeCType localeCollate +localeProvider localhost localobjectreference locktype @@ -887,7 +1007,11 @@ logLevel lookups lsn lt +lz +mTLS macOS +majorVersion +majorVersionUpgradeFromImage malcolm mallocs managedRoleSecretVersion @@ -899,9 +1023,11 @@ maxClientConnections maxParallel maxStandbyNamesFromCluster maxSyncReplicas +maximumLag maxwait mcache md +mediatype mem memstats metav @@ -911,15 +1037,19 @@ microservice microservices microsoft minApplyDelay +minKubeVersion minSyncReplicas minikube minio +misconfigurations mmap monitoringconfiguration mountPath msg mspan multinamespace +mutatingwebhookconfigurations +mutex myAKSCluster myResourceGroup namespace @@ -970,22 +1100,29 @@ operatorgroups operatorhub osdk ou +overridable ownerMetadata ownerReference packagemanifests parseable +paru passfile passwd passwordSecret passwordStatus pc pdf +periodSeconds persistentvolumeclaim persistentvolumeclaims pgAdmin pgBouncer pgBouncerIntegration pgBouncerSecrets +pgDataImageInfo +pgDumpExtraOptions +pgRestoreExtraOptions +pgRouting pgSQL pgadmin pgaudit @@ -998,19 +1135,23 @@ pgpass pgstatstatements phaseReason pid +pinger pitr plpgsql pluggable pluginConfiguration +pluginMetadata pluginStatus png podAffinityTerm podAntiAffinity podAntiAffinityType +podCount podMetricsEndpoints podMonitorMetricRelabelings podMonitorRelabelings podName +podStatuses podmonitor podtemplates poolMode @@ -1048,10 +1189,14 @@ programmatically proj projectedVolumeTemplate prometheus +promotable promotionTimeout promotionToken provisioner psql +publicationDBName +publicationName +publicationReclaimPolicy pv pvc pvcCount @@ -1060,6 +1205,7 @@ pvcTemplate quantile queryable quickstart +quorumFailoverProtection rbac rc readService @@ -1068,6 +1214,7 @@ readthedocs readyInstances reconciler reconciliationLoop +reconnection recoverability recoveredCluster recoveryTarget @@ -1087,20 +1234,27 @@ repmgr reportNonRedacted reportRedacted req +requestTimeout requiredDuringSchedulingIgnoredDuringExecution resizeInUseVolumes resizingPVC +resourceRequirements resourceVersion resourcerequirements restoreAdditionalCommandArgs +restoreJobHookCapabilities resync retentionPolicy +retryable reusePVC ro robfig roleRef rollingupdatestatus rollout +rollouts +rpo +rto runonserver runtime rw @@ -1109,6 +1263,7 @@ sa sas scalability scalable +scaleway sccs scheduledbackup scheduledbackuplist @@ -1117,6 +1272,7 @@ scheduledbackupspec scheduledbackupstatus schedulerName schemaOnly +schemas sdk searchAttribute searchFilter @@ -1151,7 +1307,9 @@ shmmax shutdownCheckpointToken sig sigs +sigstore singlenamespace +skipRange slotPrefix smartShutdownTimeout snapshotBackupStatus @@ -1159,6 +1317,7 @@ snapshotOwnerReference snapshotted snapshotting sourceNamespace +specDescriptors specificities sql src @@ -1173,11 +1332,14 @@ sslkey sslmode sslrootcert sso +standbyNames standbyNamesPost standbyNamesPre +standbyNumber startDelay startedAt stateful +statusDescriptors stderr stdout stedolan @@ -1196,24 +1358,31 @@ subcommand subcommands subdirectory subresource +subscriptionReclaimPolicy substatement +successThreshold successfullyExtracted sudo superuserSecret superuserSecretVersion sv svc +svg switchReplicaClusterStatus switchoverDelay switchovers syncReplicaElectionConstraint +synchronizeLogicalDecoding synchronizeReplicas synchronizeReplicasCache sys syslog +systemID systemd sysv tAc +tableExpression +tablesInSchema tablespace tablespaceClassName tablespaceMapFile @@ -1236,11 +1405,13 @@ tbody tcp td temporaryData +terminationGracePeriodSeconds th thead timeLineID timeframes timelineID +timeoutSeconds tls tmp tmpfs @@ -1248,12 +1419,15 @@ tolerations topologies topologyKey topologySpreadConstraints +toto transactionID transactional transactionid tx ubi +ui uid +uint ul un uncordon @@ -1270,11 +1444,14 @@ updateStrategy upgradable uptime uri +url usename usernamepassword usr utils validUntil +validatingwebhookconfigurations +validator valueFrom viceversa virtualized @@ -1305,5 +1482,8 @@ wsl www xact xlog +xml +xz yaml yml +zstd diff --git a/ADOPTERS.md b/ADOPTERS.md index a732916833..17e77c2276 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -1,24 +1,29 @@ # Adopters -Below you can find a list of organizations and users who have agreed to -tell the world that they are using PostgreSQL in Kubernetes with our -CloudNativePG operator in a production environment. +Below is a list of organizations and users who have publicly shared that +they’re using PostgreSQL in Kubernetes with the CloudNativePG operator in a +production environment. -The goal of this list is to inspire others to do the same and to grow -this open source community and project. +The purpose of this list is to inspire others to join the movement and help +grow our open-source community and project. -Please add your organization to this list. It takes 5 minutes of your time, -but it means a lot to us. +Adding your organization takes just 5 minutes of your time, but it means a lot +to us! -## Updating this list +## How to Add Your Organization -To add your organization to this list, you can either: +You can add your organization to this list in two ways: -- [open a pull request](https://github.com/cloudnative-pg/cloudnative-pg/pulls) to directly update this file, or -- [edit this file](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/ADOPTERS.md) directly in GitHub +- [Open a pull request](https://github.com/cloudnative-pg/cloudnative-pg/pulls) + to directly update this file. +- [Edit the file](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/ADOPTERS.md) + directly on GitHub. -Feel free to ask in the Slack chat if you any questions and/or require -assistance with updating this list. +Use the commit title: **"docs: add to `ADOPTERS.md`"** and +be sure to [sign off your work](contribute/README.md#sign-your-work). + +If you need any assistance, feel free to ask in our Slack chat—we’re here to +help! ## CloudNativePG Adopters @@ -46,4 +51,20 @@ This list is sorted in chronological order, based on the submission date. | [Microsoft Azure](https://azure.microsoft.com/en-us/) | @KenKilty | 2024-08-22 | Learn how to [deploy](https://learn.microsoft.com/azure/aks/postgresql-ha-overview) PostgreSQL on [Azure Kubernetes Services (AKS)](https://learn.microsoft.com/azure/aks/what-is-aks) with [EDB commercial support](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.edb-enterprise) and [EDB Postgres-as-a-Service](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.biganimal-prod-v1) offerings available in the [Azure Marketplace](https://azuremarketplace.microsoft.com/).| | [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.| | [Telnyx](https://www.telnyx.com) | @aryklein | 2024-09-24 | Telnyx leverages PostgreSQL as its relational database for internal services, managing databases with high availability using CloudNativePG across multiple Kubernetes clusters in different sites, with distributed replica clusters to ensure data redundancy and resilience. | - +| [Alpcot](https://alpcot.se) | @svenakela | 2024-09-24 | Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. | +| [GEICO Tech](https://www.geico.com/tech/) | @ardentperf | 2024-09-24 | GEICO Tech is building the most consumer-centric insurance offerings in America. CloudNativePG is used to provide a highly available Kubernetes-based Postgres service, both in the cloud and on-premises. | +| [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | +| [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | +| [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. | +| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. | +| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. | +| [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. | +| [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. | +| [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. | +| [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. | +| [Giant Swarm](https://www.giantswarm.io/) | [@stone-z](https://github.com/stone-z) | 2025-05-02 | Giant Swarm's full-service Kubernetes security and observability platforms are powered by PostgreSQL clusters delightfully managed with CloudNativePG. | +| [DocumentDB Operator](https://github.com/microsoft/documentdb-kubernetes-operator) | [@xgerman](https://github.com/xgerman) | 2025-05-22 | The DocumentDB Kubernetes Operator is an open-source project to run and manage DocumentDB on Kubernetes. [DocumentDB](https://github.com/microsoft/documentdb) is the engine powering vCore-based [Azure Cosmos DB for MongoDB](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/). The operator uses CloudNativePG behind the scenes. | +| [Xata](https://xata.io) | [@tsg](https://github.com/tsg) | 2025-05-29 | Xata is a PostgreSQL platform offering instant database branching, separation of storage/compute, and PII anonymization. It uses CloudNativePG for the compute part. | +| [Vera Rubin Observatory](https://www.lsst.org) | [@cbarria](https://github.com/cbarria) | 2025-06-17 | At the heart of our operations, CloudNativePG supports the telescope's systems and plays a key role in making astronomical data openly accessible to the world. | +| [Brella](https://www.brella.io) | [@vitobotta](https://github.com/vitobotta/) | 2025-08-11 | Brella is an event management platform that works in new and smart ways. Postgres is at the core of how our platform is built. With CloudNativePG, we moved from using a managed Postgres service - Cloud SQL on Google Cloud - to running Postgres clusters directly in Kubernetes. This change saves us money and gives us more control. At the same time, we didn't lose any functionality.| +| [Linux Polska](https://linuxpolska.com) | [@maaciekk](https://github.com/maaciekk) | 2025-08-11 | CloudNativePG is our gold standard for providing highly available databases in a Kubernetes environment, powering mission-critical applications across various industries like healthcare and finance. Independent rebuilds of CloudNativePG images are also part of our SourceMation stack. | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3a18b3d1d..94bafbd5c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,23 +1,21 @@ # Contributing to CloudNativePG -Welcome! We are glad that you want to contribute to our CloudNativePG project! 💖 +Welcome! We are glad that you want to contribute to the CloudNativePG project! 💖 -As you get started, you are in the best position to give us feedbacks on areas of -our project that we need help with, including: +To get started, here's some areas the project could really use some help with: * Problems found while setting up the development environment * Gaps in our documentation -* Bugs in our Github actions +* Bugs in our GitHub actions * Promotion of PostgreSQL on Kubernetes with our operator -First, though, it is important that you read the [code of -conduct](CODE_OF_CONDUCT.md). +First, though, it is important that you read the +[code of conduct](CODE_OF_CONDUCT.md). The guidelines below are a starting point. We don't want to limit your -creativity, passion, and initiative. If you think there's a better way, please -feel free to bring it up in a Github discussion, or open a pull request. We're -certain there are always better ways to do things, we just need to start some -constructive dialogue! +creativity, passion, and initiative. If you think there are other things +you can contribute, please feel free to bring it up in a GitHub Issue, +or open a Pull Request! ## Ways to contribute @@ -28,29 +26,36 @@ We welcome many types of contributions including: * Bug fixes * [Documentation](docs/README.md) * Issue Triage -* Answering questions on Slack or Github Discussions -* Web design +* Answering questions on [Slack](README.md#communications) or GitHub Discussions +* The [website](https://github.com/cloudnative-pg/cloudnative-pg.github.io) * Communications / Social Media / Blog Posts -* Events participation +* Advocacy at Events (let us know when your talk about CloudNativePG is accepted!) * Release management For development contributions, please refer to the separate section called ["Contributing to the source code"](contribute/README.md). +## External Contributors vs Maintainers + +**External Contributors:** If you're contributing from outside the core team, please note that some instructions in our detailed development docs apply only to maintainers. See the [development contribution guide](contribute/README.md) for complete details, but note: + +- **Issue Assignment**: Comment "I'd like to work on this" instead of self-assigning +- **Testing**: Run local unit tests and basic e2e tests (see [testing guide](contribute/e2e_testing_environment/README.md)); maintainers will handle comprehensive cloud-based E2E testing +- **Project Boards**: Maintainers will move tickets through project phases + ## Ask for Help The best way to reach us with a question when contributing is to drop a line in -our [Slack channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g), or -start a new Github discussion. +our [Slack channel](README.md#communications), or start a new GitHub discussion. ## Raising Issues -When raising issues, please specify the following: +When raising [Issues](https://github.com/cloudnative-pg/cloudnative-pg/issues), please specify the following: -- Setup details as specified in the issue template -- A scenario where the issue occurred (with details on how to reproduce it) -- Errors and log messages that are displayed by the involved software -- Any other detail that might be useful +* Setup details as specified in the Issue template +* A scenario where the issue occurred (with details on how to reproduce it) +* Errors and log messages that are displayed by the involved software +* Any other detail that might be useful If you are trying to report a vulnerability, please refer to the [security policy](SECURITY.md). diff --git a/Dockerfile b/Dockerfile index c96d232364..2aa61ae76a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,34 +1,17 @@ +ARG BASE=gcr.io/distroless/static-debian12:nonroot@sha256:a9f88e0d99c1ceedbce565fad7d3f96744d15e6919c19c7dafe84a6dd9a80c61 + # This builder stage it's only because we need a command -# to create a symlink and reduce the size of the image -FROM gcr.io/distroless/static-debian12:debug-nonroot AS builder +# to create a symlink and we do not have it in a distroless image +FROM gcr.io/distroless/static-debian12:debug-nonroot@sha256:a855ba843839f3344272cb64183489d91c190af11bec454e5d17f341255944e1 AS builder ARG TARGETARCH - SHELL ["/busybox/sh", "-c"] -COPY --chown=nonroot:nonroot --chmod=0755 dist/manager/* bin/ -RUN ln -sf bin/manager_${TARGETARCH} manager - -FROM gcr.io/distroless/static-debian12:nonroot -ARG VERSION="dev" -ARG TARGETARCH - -ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." - -LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.display-name="$SUMMARY" \ - io.k8s.description="$DESCRIPTION" \ - name="CloudNativePG Operator" \ - vendor="CloudNativePG Contributors" \ - url="https://cloudnative-pg.io/" \ - version="$VERSION" \ - release="1" +RUN ln -sf operator/manager_${TARGETARCH} manager +FROM ${BASE} WORKDIR / - -# Needs to copy the entire content, otherwise, it will not -# copy the symlink properly. +COPY --chown=nonroot:nonroot --chmod=0755 dist/manager/* operator/ COPY --from=builder /home/nonroot/ . +COPY licenses /licenses +COPY LICENSE /licenses USER 65532:65532 - ENTRYPOINT ["/manager"] diff --git a/Dockerfile-ubi8 b/Dockerfile-ubi8 deleted file mode 100644 index 4c2712de72..0000000000 --- a/Dockerfile-ubi8 +++ /dev/null @@ -1,27 +0,0 @@ -FROM registry.access.redhat.com/ubi8/ubi-micro -ARG VERSION="dev" -ARG TARGETARCH - -ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." - -LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.display-name="$SUMMARY" \ - io.k8s.description="$DESCRIPTION" \ - name="CloudNativePG Operator" \ - vendor="CloudNativePG Contributors" \ - url="https://cloudnative-pg.io/" \ - version="$VERSION" \ - release="1" - -COPY licenses /licenses -COPY LICENSE /licenses - -WORKDIR / - -COPY dist/manager/* /bin/ -RUN ln -sf /bin/manager_${TARGETARCH} manager -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/Dockerfile-ubi9 b/Dockerfile-ubi9 deleted file mode 100644 index 74409e03ca..0000000000 --- a/Dockerfile-ubi9 +++ /dev/null @@ -1,27 +0,0 @@ -FROM registry.access.redhat.com/ubi9/ubi-micro -ARG VERSION="dev" -ARG TARGETARCH - -ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." - -LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.display-name="$SUMMARY" \ - io.k8s.description="$DESCRIPTION" \ - name="CloudNativePG Operator" \ - vendor="CloudNativePG Contributors" \ - url="https://cloudnative-pg.io/" \ - version="$VERSION" \ - release="1" - -COPY licenses /licenses -COPY LICENSE /licenses - -WORKDIR / - -COPY dist/manager/* /bin/ -RUN ln -sf /bin/manager_${TARGETARCH} manager -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index 25c7e19161..3839e8b88a 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# # Image URL to use all building/pushing image targets IMAGE_NAME ?= ghcr.io/cloudnative-pg/cloudnative-pg-testing @@ -25,13 +28,20 @@ ifneq (,${IMAGE_TAG}) CONTROLLER_IMG = ${IMAGE_NAME}:${IMAGE_TAG} endif endif -CATALOG_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:catalog-/') -BUNDLE_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:bundle-/') +CATALOG_IMG ?= ${CONTROLLER_IMG}-catalog +BUNDLE_IMG ?= ${CONTROLLER_IMG}-bundle +INDEX_IMG ?= ${CONTROLLER_IMG}-index + +# Define CONTROLLER_IMG_WITH_DIGEST by appending CONTROLLER_IMG_SHA to CONTROLLER_IMG with '@' if CONTROLLER_IMG_SHA is set +ifneq ($(CONTROLLER_IMG_DIGEST),) +CONTROLLER_IMG_WITH_DIGEST := $(CONTROLLER_IMG)@$(CONTROLLER_IMG_DIGEST) +else +CONTROLLER_IMG_WITH_DIGEST := $(CONTROLLER_IMG) +endif COMMIT := $(shell git rev-parse --short HEAD || echo unknown) DATE := $(shell git log -1 --pretty=format:'%ad' --date short) VERSION := $(shell git describe --tags --match 'v*' | sed -e 's/^v//; s/-g[0-9a-f]\+$$//; s/-\([0-9]\+\)$$/-dev\1/') -REPLACE_VERSION := $(shell git describe --tags --abbrev=0 $(shell git describe --tags --match 'v*' --abbrev=0)^) LDFLAGS= "-X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion=${VERSION} $\ -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit=${COMMIT} $\ -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate=${DATE}" @@ -41,15 +51,24 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") -KUSTOMIZE_VERSION ?= v5.4.3 -CONTROLLER_TOOLS_VERSION ?= v0.16.3 -GORELEASER_VERSION ?= v2.2.0 -SPELLCHECK_VERSION ?= 0.42.0 +# renovate: datasource=github-releases depName=kubernetes-sigs/kustomize versioning=loose +KUSTOMIZE_VERSION ?= v5.6.0 +# renovate: datasource=go depName=sigs.k8s.io/controller-tools +CONTROLLER_TOOLS_VERSION ?= v0.19.0 +GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca +# renovate: datasource=go depName=github.com/goreleaser/goreleaser +GORELEASER_VERSION ?= v2.12.0 +# renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker +SPELLCHECK_VERSION ?= 0.51.0 +# renovate: datasource=docker depName=getwoke/woke versioning=docker WOKE_VERSION ?= 0.19.0 -OPERATOR_SDK_VERSION ?= v1.37.0 -OPM_VERSION ?= v1.47.0 -PREFLIGHT_VERSION ?= 1.10.0 -OPENSHIFT_VERSIONS ?= v4.12-v4.17 +# renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose +OPERATOR_SDK_VERSION ?= v1.41.1 +# renovate: datasource=github-tags depName=operator-framework/operator-registry +OPM_VERSION ?= v1.57.0 +# renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight +PREFLIGHT_VERSION ?= 1.14.1 +OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 export CONTROLLER_IMG @@ -101,14 +120,17 @@ test: generate fmt vet manifests envtest ## Run tests. source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=60s ;\ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=60s ;\ - go test -coverpkg=./... --count=1 -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils ; + go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils/... + +test-race: generate fmt vet manifests envtest ## Run tests enabling race detection. + mkdir -p ${ENVTEST_ASSETS_DIR} ;\ + source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ + go run github.com/onsi/ginkgo/v2/ginkgo -r -p --skip-package=e2e \ + --race --keep-going --fail-on-empty --randomize-all --randomize-suites e2e-test-kind: ## Run e2e tests locally using kind. hack/e2e/run-e2e-kind.sh -e2e-test-k3d: ## Run e2e tests locally using k3d. - hack/e2e/run-e2e-k3d.sh - e2e-test-local: ## Run e2e tests locally using the default kubernetes context. hack/e2e/run-e2e-local.sh @@ -121,16 +143,29 @@ build-manager: generate fmt vet ## Build manager binary. build-plugin: generate fmt vet ## Build plugin binary. go build -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg +build-race: generate fmt vet build-manager-race build-plugin-race ## Build the binaries adding the -race option. + +build-manager-race: generate fmt vet ## Build manager binary with -race option. + go build -race -o bin/manager -ldflags ${LDFLAGS} ./cmd/manager + +build-plugin-race: generate fmt vet ## Build plugin binary. + go build -race -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg + + run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config. go run ./cmd/manager docker-build: go-releaser ## Build the docker image. GOOS=linux GOARCH=${ARCH} GOPATH=$(go env GOPATH) DATE=${DATE} COMMIT=${COMMIT} VERSION=${VERSION} \ - $(GO_RELEASER) build --skip=validate --clean --single-target $(if $(VERSION),,--snapshot) - DOCKER_BUILDKIT=1 docker build . -t ${CONTROLLER_IMG} --build-arg VERSION=${VERSION} - -docker-push: ## Push the docker image. - docker push ${CONTROLLER_IMG} + $(GO_RELEASER) build --skip=validate --clean --single-target $(if $(VERSION),,--snapshot); \ + builder_name_option=""; \ + if [ -n "${BUILDER_NAME}" ]; then \ + builder_name_option="--builder ${BUILDER_NAME}"; \ + fi; \ + DOCKER_BUILDKIT=1 buildVersion=${VERSION} revision=${COMMIT} \ + docker buildx bake $${builder_name_option} --set=*.platform="linux/${ARCH}" \ + --set distroless.tags="$${CONTROLLER_IMG}" \ + --push distroless olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM installation set -xeEuo pipefail ;\ @@ -144,7 +179,6 @@ olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM install rm -fr bundle bundle.Dockerfile ;\ sed -i -e "s/ClusterRole/Role/" "$${CONFIG_TMP_DIR}/config/rbac/role.yaml" "$${CONFIG_TMP_DIR}/config/rbac/role_binding.yaml" ;\ ($(KUSTOMIZE) build "$${CONFIG_TMP_DIR}/config/olm-manifests") | \ - sed -e "s@\$${VERSION}@${VERSION}@g; s@\$${REPLACE_VERSION}@${REPLACE_VERSION}@g" | \ $(OPERATOR_SDK) generate bundle --verbose --overwrite --manifests --metadata --package cloudnative-pg --channels stable-v1 --use-image-digests --default-channel stable-v1 --version "${VERSION}" ; \ echo -e "\n # OpenShift annotations." >> bundle/metadata/annotations.yaml ;\ echo -e " com.redhat.openshift.versions: $(OPENSHIFT_VERSIONS)" >> bundle/metadata/annotations.yaml ;\ @@ -164,6 +198,8 @@ olm-catalog: olm-bundle opm ## Build and push the index image for OLM Catalog - Image: ${BUNDLE_IMG}" | envsubst > cloudnative-pg-operator-template.yaml $(OPM) alpha render-template semver -o yaml < cloudnative-pg-operator-template.yaml > catalog/catalog.yaml ;\ $(OPM) validate catalog/ ;\ + $(OPM) index add --mode semver --container-tool docker --bundles "${BUNDLE_IMG}" --tag "${INDEX_IMG}" ;\ + docker push ${INDEX_IMG} ;\ DOCKER_BUILDKIT=1 docker build --push -f catalog.Dockerfile -t ${CATALOG_IMG} . ;\ echo -e "apiVersion: operators.coreos.com/v1alpha1\n\ kind: CatalogSource\n\ @@ -194,7 +230,7 @@ generate-manifest: manifests kustomize ## Generate manifest used for deployment. cd $$CONFIG_TMP_DIR/default ;\ $(KUSTOMIZE) edit add patch --path manager_image_pull_secret.yaml ;\ cd $$CONFIG_TMP_DIR/manager ;\ - $(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG}" ;\ + $(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG_WITH_DIGEST}" ;\ $(KUSTOMIZE) edit add patch --path env_override.yaml ;\ $(KUSTOMIZE) edit add configmap controller-manager-env \ --from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" ;\ @@ -209,10 +245,6 @@ manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc. generate: controller-gen ## Generate code. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." -deploy-locally: kind-cluster ## Build and deploy operator in local cluster - set -e ;\ - hack/setup-cluster.sh -n1 -r load deploy - olm-scorecard: operator-sdk ## Run the Scorecard test from operator-sdk $(OPERATOR_SDK) scorecard ${BUNDLE_IMG} --wait-time 60s --verbose @@ -227,6 +259,9 @@ vet: ## Run go vet against code. lint: ## Run the linter. golangci-lint run +lint-fix: ## Run the linter with --fix. + golangci-lint run --fix + shellcheck: ## Shellcheck for the hack directory. @{ \ set -e ;\ @@ -303,7 +338,7 @@ $(ENVTEST): $(LOCALBIN) GENREF = $(LOCALBIN)/genref genref: ## Download kubernetes-sigs/reference-docs/genref locally if necessary. - $(call go-install-tool,$(GENREF),github.com/kubernetes-sigs/reference-docs/genref@master) # wokeignore:rule=master + $(call go-install-tool,$(GENREF),github.com/kubernetes-sigs/reference-docs/genref@$(GENREF_VERSION)) GO_LICENSES = $(LOCALBIN)/go-licenses go-licenses: ## Download go-licenses locally if necessary. @@ -328,14 +363,6 @@ GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ } endef -kind-cluster: ## Create KinD cluster to run operator locally - set -e ;\ - hack/setup-cluster.sh -n1 -r create - -kind-cluster-destroy: ## Destroy KinD cluster created using kind-cluster command - set -e ;\ - hack/setup-cluster.sh -n1 -r destroy - .PHONY: operator-sdk OPERATOR_SDK = $(LOCALBIN)/operator-sdk operator-sdk: ## Install the operator-sdk app diff --git a/PROJECT b/PROJECT index 59c5113ca1..9f0b527b4d 100644 --- a/PROJECT +++ b/PROJECT @@ -66,3 +66,30 @@ resources: kind: Database path: github.com/cloudnative-pg/cloudnative-pg/api/v1 version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cnpg.io + group: postgresql + kind: Publication + path: github.com/cloudnative-pg/cloudnative-pg/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cnpg.io + group: postgresql + kind: Subscription + path: github.com/cloudnative-pg/cloudnative-pg/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cnpg.io + group: postgresql + kind: FailoverQuorum + path: github.com/cloudnative-pg/cloudnative-pg/api/v1 + version: v1 diff --git a/README.md b/README.md index 50fd765c9b..1a8c4bec33 100644 --- a/README.md +++ b/README.md @@ -1,120 +1,102 @@ -[![CNCF Landscape](https://img.shields.io/badge/CNCF%20Landscape-5699C6)](https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg) +[![CNCF Landscape](https://img.shields.io/badge/CNCF%20Landscape-5699C6)][cncf-landscape] [![Latest Release](https://img.shields.io/github/v/release/cloudnative-pg/cloudnative-pg.svg)][latest-release] [![GitHub License](https://img.shields.io/github/license/cloudnative-pg/cloudnative-pg)][license] +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9933/badge)][openssf] +[![OpenSSF Scorecard Badge][openssf-scorecard-badge]][openssf-socrecard-view] [![Documentation][documentation-badge]][documentation] [![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] +[![FOSSA Status][fossa-badge]][fossa] -# Welcome to the CloudNativePG project! +# Welcome to the CloudNativePG Project! -**CloudNativePG** is a comprehensive open source platform designed to -seamlessly manage [PostgreSQL](https://www.postgresql.org/) databases within -Kubernetes environments, covering the entire operational lifecycle from initial -deployment to ongoing maintenance. The main component is the CloudNativePG -operator. +**CloudNativePG (CNPG)** is an open-source platform designed to seamlessly +manage [PostgreSQL](https://www.postgresql.org/) databases in Kubernetes +environments. It covers the entire operational lifecycle—from deployment to +ongoing maintenance—through its core component, the CloudNativePG operator. -CloudNativePG was originally built and sponsored by [EDB](https://www.enterprisedb.com). +## Table of Contents -## Table of content - -- [Code of conduct](CODE_OF_CONDUCT.md) -- [Governance policies](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md) +- [Code of Conduct](CODE_OF_CONDUCT.md) +- [Governance Policies](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md) - [Contributing](CONTRIBUTING.md) - [Adopters](ADOPTERS.md) +- [Commercial Support](https://cloudnative-pg.io/support/) - [License](LICENSE) ## Getting Started -The best way to get started is with the ["Quickstart"](docs/src/quickstart.md) -section in the documentation. +The best way to get started is the [Quickstart Guide](https://cloudnative-pg.io/documentation/current/quickstart/). ## Scope -The goal of CloudNativePG is to increase the adoption of PostgreSQL, one of the -most loved DBMS in traditional VM and bare metal environments, inside -Kubernetes, thus making the database an integral part of the development -process and GitOps CI/CD automated pipelines. - -### In scope - -CloudNativePG has been designed by Postgres experts with Kubernetes -administrators in mind. Put simply, it leverages Kubernetes by extending its -controller and by defining, in a programmatic way, all the actions that a good -DBA would normally do when managing a highly available PostgreSQL database -cluster. - -Since the inception, our philosophy has been to adopt a Kubernetes native -approach to PostgreSQL cluster management, making incremental decisions that -would answer the fundamental question: "What would a Kubernetes user expect -from a Postgres operator?". - -The most important decision we made is to have the status of a PostgreSQL -cluster directly available in the `Cluster` resource, so to inspect it through -the Kubernetes API. We've fully embraced the operator pattern and eventual -consistency, two of the core principles upon which Kubernetes is built for -managing complex applications. - -As a result, the operator is responsible for managing the status of the -`Cluster` resource, keeping it up to date with the information that each -PostgreSQL instance manager regularly reports back through the API server. -Changes to the cluster status might trigger, for example, actions like: - -* a PostgreSQL failover where, after an unexpected failure of a cluster's - primary instance, the operator itself elects the new primary, updates the - status, and directly coordinates the operation through the reconciliation - loop, by relying on the instance managers - -* scaling up or down the number of read-only replicas, based on a positive or - negative variation in the number of desired instances in the cluster, so that - the operator creates or removes the required resources to run PostgreSQL, - such as persistent volumes, persistent volume claims, pods, secrets, config - maps, and then coordinates cloning and streaming replication tasks - -* updates of the endpoints of the PostgreSQL services that applications rely on - to interact with the database, as Kubernetes represents the single source of - truth and authority - -* updates of container images in a rolling fashion, following a change in the - image name, by first updating the pods where replicas are running, and then - the primary, issuing a switchover first - -The latter example is based on another pillar of CloudNativePG: -immutable application containers - as explained in the -[blog article "Why EDB Chose Immutable Application Containers"](https://www.enterprisedb.com/blog/why-edb-chose-immutable-application-containers). - -The above list can be extended. However, the gist is that CloudNativePG -exclusively relies on the Kubernetes API server and the instance manager to -coordinate the complex operations that need to take place in a business -continuity PostgreSQL cluster, without requiring any assistance from an -intermediate management tool responsible for high availability and failover -management like similar open source operators. - -CloudNativePG also manages additional resources to help the `Cluster` resource -manage PostgreSQL - currently `Backup`, `ClusterImageCatalog`, `ImageCatalog`, -`Pooler`, and `ScheduledBackup`. - -Fully embracing Kubernetes means adopting a hands-off approach during temporary -failures of the Kubernetes API server. In such instances, the operator refrains -from taking action, deferring decisions until the API server is operational -again. Meanwhile, Postgres instances persist, maintaining operations based on -the latest known state of the cluster. - -### Out of scope - -CloudNativePG is exclusively focused on the PostgreSQL database management -system maintained by the PostgreSQL Global Development Group (PGDG). We are not -currently considering adding to CloudNativePG extensions or capabilities that -are included in forks of the PostgreSQL database management system, unless in -the form of extensible or pluggable frameworks. [The operator itself can be extended -via a plugin interface called CNPG-I](https://github.com/cloudnative-pg/cnpg-i). - -CloudNativePG doesn't intend to pursue database independence (e.g. control a -MariaDB cluster). +### Mission + +CloudNativePG aims to increase PostgreSQL adoption within Kubernetes by making +it an integral part of the development process and GitOps-driven CI/CD +automation. + +### Core Principles & Features + +Designed by PostgreSQL experts for Kubernetes administrators, CloudNativePG +follows a Kubernetes-native approach to PostgreSQL primary/standby cluster +management. Instead of relying on external high-availability tools (like +Patroni, repmgr, or Stolon), it integrates directly with the Kubernetes API to +automate database operations that a skilled DBA would perform manually. + +Key design decisions include: + +- Direct integration with Kubernetes API: The PostgreSQL cluster’s status is + available directly in the `Cluster` resource, allowing users to inspect it + via the Kubernetes API. +- Operator pattern: The operator ensures that the desired PostgreSQL state is + reconciled automatically, following Kubernetes best practices. +- Immutable application containers: Updates follow an immutable infrastructure + model, as explained in + ["Why EDB Chose Immutable Application Containers"](https://www.enterprisedb.com/blog/why-edb-chose-immutable-application-containers). + +### How CloudNativePG Works + +The operator continuously monitors and updates the PostgreSQL cluster state. +Examples of automated actions include: + +- Failover management: If the primary instance fails, the operator elects a new + primary, updates the cluster status, and orchestrates the transition. +- Scaling read replicas: When the number of desired replicas changes, the + operator provisions or removes resources such as persistent volumes, secrets, + and config maps while managing streaming replication. +- Service updates: Kubernetes remains the single source of truth, ensuring + that PostgreSQL service endpoints are always up to date. +- Rolling updates: When an image is updated, the operator follows a rolling + strategy—first updating replica pods before performing a controlled + switchover for the primary. + +CloudNativePG manages additional Kubernetes resources to enhance PostgreSQL +management, including: `Backup`, `ClusterImageCatalog`, `Database`, +`ImageCatalog`, `Pooler`, `Publication`, `ScheduledBackup`, and `Subscription`. + +## Out of Scope + +- **Kubernetes only:** CloudNativePG is dedicated to vanilla Kubernetes + maintained by the [Cloud Native Computing Foundation + (CNCF)](https://kubernetes.io/). +- **PostgreSQL only:** CloudNativePG is dedicated to vanilla PostgreSQL + maintained by the [PostgreSQL Global Development Group + (PGDG)](https://www.postgresql.org/about/). +- **No support for forks:** Features from PostgreSQL forks will only be + considered if they can be integrated as extensions or pluggable frameworks. +- **Not a general-purpose database operator:** CloudNativePG does not support + other databases (e.g., MariaDB). + +CloudNativePG can be extended via the [CNPG-I plugin interface](https://github.com/cloudnative-pg/cnpg-i). ## Communications -- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g) - [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) +- [Slack](https://cloud-native.slack.com/archives/C08MAUJ7NPM) + (join the [CNCF Slack Workspace](https://communityinviter.com/apps/cloud-native/cncf)). - [Twitter](https://twitter.com/CloudNativePg) +- [Mastodon](https://mastodon.social/@CloudNativePG) +- [Bluesky](https://bsky.app/profile/cloudnativepg.bsky.social) ## Resources @@ -132,14 +114,17 @@ organization to this list! ### CloudNativePG at KubeCon -- March 21 2024, KubeCon Europe 2024 in Paris: ["Scaling Heights: Mastering Postgres Database Vertical Scalability with Kubernetes Storage Magic"](https://kccnceu2024.sched.com/event/1YeM4/scaling-heights-mastering-postgres-database-vertical-scalability-with-kubernetes-storage-magic-gabriele-bartolini-edb-gari-singh-google) (Gari Singh, Google & Gabriele Bartolini, EDB) -- March 19 2024, Data on Kubernetes Day at KubeCon Europe 2024 in Paris: ["From Zero to Hero: Scaling Postgres in Kubernetes Using the Power of CloudNativePG"](https://colocatedeventseu2024.sched.com/event/1YFha/from-zero-to-hero-scaling-postgres-in-kubernetes-using-the-power-of-cloudnativepg-gabriele-bartolini-edb) (Gabriele Bartolini, EDB) -- 7 November 2023, KubeCon North America 2023 in Chicago: ["Disaster Recovery with Very Large Postgres Databases (in Kubernetes)"](https://kccncna2023.sched.com/event/1R2ml/disaster-recovery-with-very-large-postgres-databases-gabriele-bartolini-edb-michelle-au-google) (Michelle Au, Google & Gabriele Bartolini, EDB) -- 27 October 2022, KubeCon North America 2022 in Detroit: ["Data On Kubernetes, Deploying And Running PostgreSQL And Patterns For Databases In a Kubernetes Cluster"](https://kccncna2022.sched.com/event/182GB/data-on-kubernetes-deploying-and-running-postgresql-and-patterns-for-databases-in-a-kubernetes-cluster-chris-milsted-ondat-gabriele-bartolini-edb) (Chris Milsted, Ondat & Gabriele Bartolini, EDB) +- April 4 2025, KubeCon Europe in London: ["Consistent Volume Group Snapshots, Unraveling the Magic"](https://sched.co/1tx8g) - Leonardo Cecchi (EDB) and Xing Yang (VMware) +- November 11 2024, Cloud Native Rejekts NA 2024: ["Maximising Microservice Databases with Kubernetes, Postgres, and CloudNativePG"](https://www.youtube.com/watch?v=uBzl_stoxoc&ab_channel=CloudNativeRejekts) - Gabriele Bartolini (EDB) and Leonardo Cecchi (EDB) +- March 21 2024, KubeCon Europe 2024 in Paris: ["Scaling Heights: Mastering Postgres Database Vertical Scalability with Kubernetes Storage Magic"](https://kccnceu2024.sched.com/event/1YeM4/scaling-heights-mastering-postgres-database-vertical-scalability-with-kubernetes-storage-magic-gabriele-bartolini-edb-gari-singh-google) - Gari Singh, Google & Gabriele Bartolini, EDB +- March 19 2024, Data on Kubernetes Day at KubeCon Europe 2024 in Paris: ["From Zero to Hero: Scaling Postgres in Kubernetes Using the Power of CloudNativePG"](https://colocatedeventseu2024.sched.com/event/1YFha/from-zero-to-hero-scaling-postgres-in-kubernetes-using-the-power-of-cloudnativepg-gabriele-bartolini-edb) - Gabriele Bartolini, EDB +- 7 November 2023, KubeCon North America 2023 in Chicago: ["Disaster Recovery with Very Large Postgres Databases (in Kubernetes)"](https://kccncna2023.sched.com/event/1R2ml/disaster-recovery-with-very-large-postgres-databases-gabriele-bartolini-edb-michelle-au-google) - Michelle Au, Google & Gabriele Bartolini, EDB +- 27 October 2022, KubeCon North America 2022 in Detroit: ["Data On Kubernetes, Deploying And Running PostgreSQL And Patterns For Databases In a Kubernetes Cluster"](https://kccncna2022.sched.com/event/182GB/data-on-kubernetes-deploying-and-running-postgresql-and-patterns-for-databases-in-a-kubernetes-cluster-chris-milsted-ondat-gabriele-bartolini-edb) - Chris Milsted, Ondat & Gabriele Bartolini, EDB ### Useful links - [Data on Kubernetes (DoK) Community](https://dok.community/) +- ["Cloud Neutral Postgres Databases with Kubernetes and CloudNativePG" by Gabriele Bartolini](https://www.cncf.io/blog/2024/11/20/cloud-neutral-postgres-databases-with-kubernetes-and-cloudnativepg/) (November 2024) - ["How to migrate your PostgreSQL database in Kubernetes with ~0 downtime from anywhere" by Gabriele Bartolini](https://gabrielebartolini.it/articles/2024/03/cloudnativepg-recipe-5-how-to-migrate-your-postgresql-database-in-kubernetes-with-~0-downtime-from-anywhere/) (March 2024) - ["Maximizing Microservice Databases with Kubernetes, Postgres, and CloudNativePG" by Gabriele Bartolini](https://gabrielebartolini.it/articles/2024/02/maximizing-microservice-databases-with-kubernetes-postgres-and-cloudnativepg/) (February 2024) - ["Recommended Architectures for PostgreSQL in Kubernetes" by Gabriele Bartolini](https://www.cncf.io/blog/2023/09/29/recommended-architectures-for-postgresql-in-kubernetes/) (September 2023) @@ -149,18 +134,52 @@ organization to this list! - ["Shift-Left Security: The Path To PostgreSQL On Kubernetes" by Gabriele Bartolini](https://www.tfir.io/shift-left-security-the-path-to-postgresql-on-kubernetes/) (April 2021) - ["Local Persistent Volumes and PostgreSQL usage in Kubernetes" by Gabriele Bartolini](https://www.2ndquadrant.com/en/blog/local-persistent-volumes-and-postgresql-usage-in-kubernetes/) (June 2020) -## Star History +--- + +

+We are a Cloud Native Computing Foundation Sandbox project. +

+ +

+ + + + CNCF logo + +

-[![Star History Chart](https://api.star-history.com/svg?repos=cloudnative-pg/cloudnative-pg&type=Date)](https://star-history.com/#cloudnative-pg/cloudnative-pg&Date) +--- -## Trademarks +

+CloudNativePG was originally built and sponsored by EDB. +

-*[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) +

+ + + + EDB logo + +

+ +--- + +

+Postgres, PostgreSQL, and the Slonik Logo are trademarks or registered trademarks of the PostgreSQL Community Association -of Canada, and used with their permission.* +of Canada, and used with their permission. +

+ +--- +[cncf-landscape]: https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg [stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg [latest-release]: https://github.com/cloudnative-pg/cloudnative-pg/releases/latest [documentation]: https://cloudnative-pg.io/documentation/current/ [license]: https://github.com/cloudnative-pg/cloudnative-pg?tab=Apache-2.0-1-ov-file#readme +[openssf]: https://www.bestpractices.dev/projects/9933 +[openssf-scorecard-badge]: https://api.scorecard.dev/projects/github.com/cloudnative-pg/cloudnative-pg/badge +[openssf-socrecard-view]: https://scorecard.dev/viewer/?uri=github.com/cloudnative-pg/cloudnative-pg [documentation-badge]: https://img.shields.io/badge/Documentation-white?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGN0lEQVR4nJRXXWwcVxU%2B8%2F%2BzP%2BPZtR2v7dqy07jUJUALNaiK6lZyUVVKWgGKaIv8QCMekBAVQlQICcEzVZFQVYFKQhASEBHlISJPCRJEshTFChgrIYHEiYMh69jetffHM7Mzc%2B9Bs7vjnTs7yZpZWbt37s%2F5zne%2Bc861CD0eXRkbHc3NfjeffvxNAGEAgULD2756v35%2B3qe1Nc4fnQVEXlA2LnOcXlCF8S%2B6vvVgq%2FL3M65X3e51PvfQCU4WJgZe%2B8GQ8fS7AKgjBB8KEHwjDXZSjkf0CREAaXM2eI9c65siqWxWl360Xl74ANHz%2Fy8AitxnTBfmz%2BhyYS4wGhwObQCIHSA0AigOMBzvOsXzd4pnjyL6NMmWEH8hi2b28Og3%2FqRJA0ewfQy0v1vGO2NovwPo%2FEU%2FwVgSU1PI%2BSu79v3lJAB8HM%2BTI%2FO%2FUUXzM4xHIe0xI4DdRqOAwnF%2F38ePPyzaDIDh%2FMxcWh462m08aojuGY97C0nrAEHg9BlF0fmeAPr0J15vbaKsp0BZQzEDEAlP9B209UIIVXUta%2FQEQHwxgxFjTc%2BRskAwrgVWmHtg22vMPJwLDqGUNJIAMHVAkGu3WdpZz6NAkgSXpINSycluV28er1a3rJ4M3F2%2F9AtCvXKycRrTQttrjINjxxxIL9jevxdaDHU%2FTBr6pL5ruzuLZubgUQBOY2hPij3GBUe7tBCMBRE2KrXVSz0BBI%2FtPVgtV%2F%2FxkZ5WSjI%2F%2BFIXC3sHJwgT4yFqrZFFTSlVrp3sGYLwcfxSmXCbS00j2Ms4K7qkOsFx6qdTuiHtG4AimfmM8NyvOvR2G48qXtZ2fsfrN7%2BqpcRyUp0glKiimDm4TwAcHBp%2B9WeA4ki0GMWNR9OVF8BZvn7xtI%2FF09H8jzLEgz6yLwCDuelnFXHkTZZOytCOEdqDOtGwsm%2BNj00fXt%2B6%2Bj4vcA7bwNrZwENmXwAKuZnvsNRThs5ozMPfPiHyoDF7xiduHcXb70A8dRFheHjiySQATBZk0nl9MHPkBEWUoEtYjyrPFNwGzfdlD37Zdu98KCv%2BMmD2BYpUCvcST39e0%2BS1Wr249FAAg7mPzWrS5NstEbE0xrsiA6QN1PfRFLnhr%2BspxVJTlY8Mw1DqNXeyCQFREEXz9cHB0QOev73QaNhOF4B%2B45PHFHFgDhJTqjuubJFqX1KQco7NTTuW8kq95k2G4eLEGzM7lfItnjNeTKcOfV%2FT8hOuV77A9IK0XjgMpCO0ZiuV3L%2F6njCFAOmucGB3OII5XgCXEJTDdZLElVbu3Vz0fWexvL30k0B6ggBACOmIUBAEUKX0dDTvW7RCYcdZPq6n%2FSsQnUO2RuyBRgQ9Rc5mMvJ6CNIj1nXfd9qWAsCkaZzJAk1L8UjVqY737dSjfCGrPHWqXL32Q0mB%2F2BXnke00WaEYv2aTzAbnuV5pcWkDGAAGJmhSafh6hjr%2BW2SVYHrP7bb%2BOdPW%2FUgflGlTM2gaK%2Ft7tp6%2BN6yixdN89DcIwGktIFPABfNbwoQqQWEUnDJzg1g0jDeK5p7Kp7nensXFI7uyAr%2FLyM7fYLnpa6LYScE8vDnot5hrKlslm%2BfE3nVxJgO4o3KcYu%2FF8XM8yFQ27n%2F65Te%2FzKl3Jhpjj6TCIDneRD5%2FItxr1vdkALw7p1qfeWPpjHxMtsXaPxu6FLc%2BrnbSB1r7fcrlr36nqwMzQfnplJDryQCGOh%2FbLjhcM%2FEvQ4Pdund9xRV5m1LfTXaF%2BK9gsLGB9nsgddcz8thM%2FarPzYM8%2FFazf9sMFaU%2Fi%2FwvNANwEhPvUGR8ozn7d%2BiDKXixtKpbHp81nV9E7puRy31ixKUbOe%2Fv3Ud891ghhDrL5Z975eaOvV%2BCNRp0Gfz%2BcJjDABdTwlpdfKbId0t5XYAcHz5D5ZVtWUp9%2Flog2L7PgVJqZx0HOE5Cqghemv1%2Bt%2FeGBmZ%2BdB2yNN72UEpnzXG32YADA186i3bIpPxMhuKrFK%2Fd77JUnbkKbYvRJlC8DzKSZK76Lq1he2dKy%2BZuSfesSz5a2xHDbLJ%2BJaqdv5H4EUY%2BzbG2m9HgN7mg81bfw4W1uu7AjvHaqDhqF%2FZ3Fq5XFy%2FcESSDsx5fvZ7wLEsNfXk%2BjlVHfpSCOB%2FAQAA%2F%2F8zd8orZc2N9AAAAABJRU5ErkJggg%3D%3D +[fossa-badge]: https://app.fossa.com/api/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg.svg?type=small +[fossa]: https://app.fossa.com/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg?ref=badge_small diff --git a/SUPPORT.md b/SUPPORT.md new file mode 100644 index 0000000000..06f152bdbf --- /dev/null +++ b/SUPPORT.md @@ -0,0 +1,30 @@ +# Commercial Support for CloudNativePG + +CloudNativePG is an independent open-source project and does not officially +endorse any specific company or service provider. + +However, to assist users in finding professional support, the +"[Commercial Support](https://cloudnative-pg.io/support/)" +page offers an alphabetical list of companies and individuals providing +CloudNativePG-related products or services. + +*Please note that the CloudNativePG authors are not responsible for the accuracy +or content provided by the listed companies or individuals.* + +## How to Get Listed + +To have your company or personal services featured on this list, please submit +a [pull request to the CloudNativePG website](https://github.com/cloudnative-pg/cloudnative-pg.github.io) +by adding a `.md` file in the [`content/support` folder](https://github.com/cloudnative-pg/cloudnative-pg.github.io/tree/main/content/support) +containing the following information: + +1. **Organisation Name**: Clearly specify the name of your company or entity. +2. **Organisation Logo**: Provide your company logo in SVG format. +3. **Website Link**: Include a link to your homepage or a dedicated landing + page that explicitly mentions CloudNativePG support and includes at least one + link back to [cloudnative-pg.io](https://cloudnative-pg.io). + +[CloudNativePG maintainers will vet each submission](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md#voting) +and reserve the right to reject your application or request changes if your website +doesn’t clearly mention CloudNativePG support or if it doesn't include at least +one link back to [cloudnative-pg.io](https://cloudnative-pg.io). diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go new file mode 100644 index 0000000000..cdce7b6de3 --- /dev/null +++ b/api/v1/backup_funcs.go @@ -0,0 +1,304 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "sort" + "strconv" + "strings" + "time" + + volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// SetAsFailed marks a certain backup as invalid +func (backupStatus *BackupStatus) SetAsFailed( + err error, +) { + backupStatus.Phase = BackupPhaseFailed + + if err != nil { + backupStatus.Error = err.Error() + } else { + backupStatus.Error = "" + } +} + +// SetAsFinalizing marks a certain backup as finalizing +func (backupStatus *BackupStatus) SetAsFinalizing() { + backupStatus.Phase = BackupPhaseFinalizing + backupStatus.Error = "" +} + +// SetAsCompleted marks a certain backup as completed +func (backupStatus *BackupStatus) SetAsCompleted() { + backupStatus.Phase = BackupPhaseCompleted + backupStatus.Error = "" + backupStatus.StoppedAt = ptr.To(metav1.Now()) +} + +// SetAsStarted marks a certain backup as started +func (backupStatus *BackupStatus) SetAsStarted(podName, containerID string, method BackupMethod) { + backupStatus.Phase = BackupPhaseStarted + backupStatus.InstanceID = &InstanceID{ + PodName: podName, + ContainerID: containerID, + } + backupStatus.Method = method +} + +// SetSnapshotElements sets the Snapshots field from a list of VolumeSnapshot +func (snapshotStatus *BackupSnapshotStatus) SetSnapshotElements(snapshots []volumesnapshot.VolumeSnapshot) { + snapshotNames := make([]BackupSnapshotElementStatus, len(snapshots)) + for idx, volumeSnapshot := range snapshots { + snapshotNames[idx] = BackupSnapshotElementStatus{ + Name: volumeSnapshot.Name, + Type: volumeSnapshot.Annotations[utils.PvcRoleLabelName], + TablespaceName: volumeSnapshot.Labels[utils.TablespaceNameLabelName], + } + } + snapshotStatus.Elements = snapshotNames +} + +// IsDone check if a backup is completed or still in progress +func (backupStatus *BackupStatus) IsDone() bool { + return backupStatus.Phase == BackupPhaseCompleted || backupStatus.Phase == BackupPhaseFailed +} + +// GetOnline tells whether this backup was taken while the database +// was up +func (backupStatus *BackupStatus) GetOnline() bool { + if backupStatus.Online == nil { + return false + } + + return *backupStatus.Online +} + +// GetVolumeSnapshotDeadline returns the volume snapshot deadline in minutes. +func (backup *Backup) GetVolumeSnapshotDeadline() time.Duration { + const defaultValue = 10 + + value := backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] + if value == "" { + return defaultValue * time.Minute + } + + minutes, err := strconv.Atoi(value) + if err != nil { + return defaultValue * time.Minute + } + + return time.Duration(minutes) * time.Minute +} + +// IsCompletedVolumeSnapshot checks if a backup is completed using the volume snapshot method. +// It returns true if the backup's method is BackupMethodVolumeSnapshot and its status phase is BackupPhaseCompleted. +// Otherwise, it returns false. +func (backup *Backup) IsCompletedVolumeSnapshot() bool { + return backup != nil && + backup.Spec.Method == BackupMethodVolumeSnapshot && + backup.Status.Phase == BackupPhaseCompleted +} + +// IsInProgress check if a certain backup is in progress or not +func (backupStatus *BackupStatus) IsInProgress() bool { + return backupStatus.Phase == BackupPhasePending || + backupStatus.Phase == BackupPhaseStarted || + backupStatus.Phase == BackupPhaseRunning +} + +// GetPendingBackupNames returns the pending backup list +func (list BackupList) GetPendingBackupNames() []string { + // Retry the backup if another backup is running + pendingBackups := make([]string, 0, len(list.Items)) + for _, concurrentBackup := range list.Items { + if concurrentBackup.Status.IsDone() { + continue + } + if !concurrentBackup.Status.IsInProgress() { + pendingBackups = append(pendingBackups, concurrentBackup.Name) + } + } + + return pendingBackups +} + +// CanExecuteBackup control if we can start a reconciliation loop for a certain backup. +// +// A reconciliation loop can start if: +// - there's no backup running, and if the first of the sorted list of backups +// - the current backup is running and is the first running backup of the list +// +// As a side effect, this function will sort the backup list +func (list *BackupList) CanExecuteBackup(backupName string) bool { + var foundRunningBackup bool + + list.SortByName() + + for _, concurrentBackup := range list.Items { + if concurrentBackup.Status.IsInProgress() { + if backupName == concurrentBackup.Name && !foundRunningBackup { + return true + } + + foundRunningBackup = true + if backupName != concurrentBackup.Name { + return false + } + } + } + + pendingBackups := list.GetPendingBackupNames() + if len(pendingBackups) > 0 && pendingBackups[0] != backupName { + return false + } + + return true +} + +// SortByName sorts the backup items in alphabetical order +func (list *BackupList) SortByName() { + // Sort the list of backups in alphabetical order + sort.Slice(list.Items, func(i, j int) bool { + return strings.Compare(list.Items[i].Name, list.Items[j].Name) <= 0 + }) +} + +// SortByReverseCreationTime sorts the backup items in reverse creation time (starting from the latest one) +func (list *BackupList) SortByReverseCreationTime() { + // Sort the list of backups in reverse creation time + sort.Slice(list.Items, func(i, j int) bool { + return list.Items[i].CreationTimestamp.Compare(list.Items[j].CreationTimestamp.Time) > 0 + }) +} + +// GetStatus gets the backup status +func (backup *Backup) GetStatus() *BackupStatus { + return &backup.Status +} + +// GetMetadata get the metadata +func (backup *Backup) GetMetadata() *metav1.ObjectMeta { + return &backup.ObjectMeta +} + +// GetName get the backup name +func (backup *Backup) GetName() string { + return backup.Name +} + +// GetNamespace get the backup namespace +func (backup *Backup) GetNamespace() string { + return backup.Namespace +} + +// GetAssignedInstance fetches the instance that was assigned to the backup execution +func (backup *Backup) GetAssignedInstance(ctx context.Context, cli client.Client) (*corev1.Pod, error) { + if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 { + return nil, nil + } + + var previouslyElectedPod corev1.Pod + if err := cli.Get( + ctx, + client.ObjectKey{Namespace: backup.Namespace, Name: backup.Status.InstanceID.PodName}, + &previouslyElectedPod, + ); err != nil { + return nil, err + } + + return &previouslyElectedPod, nil +} + +// GetOnlineOrDefault returns the online value for the backup. +func (backup *Backup) GetOnlineOrDefault(cluster *Cluster) bool { + // Offline backups are supported only with the + // volume snapshot backup method. + if backup.Spec.Method != BackupMethodVolumeSnapshot { + return true + } + + if backup.Spec.Online != nil { + return *backup.Spec.Online + } + + if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { + return true + } + + config := backup.GetVolumeSnapshotConfiguration(*cluster.Spec.Backup.VolumeSnapshot) + if config.Online != nil { + return *config.Online + } + + return true +} + +// GetVolumeSnapshotConfiguration overrides the configuration value with the ones specified +// in the backup, if present. +func (backup *Backup) GetVolumeSnapshotConfiguration( + clusterConfig VolumeSnapshotConfiguration, +) VolumeSnapshotConfiguration { + config := clusterConfig + if backup.Spec.Online != nil { + config.Online = backup.Spec.Online + } + + if backup.Spec.OnlineConfiguration != nil { + config.OnlineConfiguration = *backup.Spec.OnlineConfiguration + } + + return config +} + +// EnsureGVKIsPresent ensures that the GroupVersionKind (GVK) metadata is present in the Backup object. +// This is necessary because informers do not automatically include metadata inside the object. +// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object. +func (backup *Backup) EnsureGVKIsPresent() { + backup.SetGroupVersionKind(schema.GroupVersionKind{ + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, + Kind: BackupKind, + }) +} + +// IsEmpty checks if the plugin configuration is empty or not +func (configuration *BackupPluginConfiguration) IsEmpty() bool { + return configuration == nil || len(configuration.Name) == 0 +} + +// IsManagedByInstance returns true if the backup is managed by the instance manager +func (b BackupMethod) IsManagedByInstance() bool { + return b == BackupMethodPlugin || b == BackupMethodBarmanObjectStore +} + +// IsManagedByOperator returns true if the backup is managed by the operator +func (b BackupMethod) IsManagedByOperator() bool { + return b == BackupMethodVolumeSnapshot +} diff --git a/api/v1/backup_types_test.go b/api/v1/backup_funcs_test.go similarity index 98% rename from api/v1/backup_types_test.go rename to api/v1/backup_funcs_test.go index 3828a1eac7..d48fbc8b35 100644 --- a/api/v1/backup_types_test.go +++ b/api/v1/backup_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index 2701b47502..e5b9ea2b36 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,23 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 import ( - "context" - "sort" - "strings" - barmanApi "github.com/cloudnative-pg/barman-cloud/pkg/api" - volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // BackupPhase is the phase of the backup @@ -126,6 +119,7 @@ const ( ) // BackupSpec defines the desired state of Backup +// +kubebuilder:validation:XValidation:rule="oldSelf == self",message="BackupSpec is immutable once set" type BackupSpec struct { // The cluster to backup Cluster LocalObjectReference `json:"cluster"` @@ -192,6 +186,7 @@ type BackupSnapshotElementStatus struct { // TablespaceName is the name of the snapshotted tablespace. Only set // when type is PG_TABLESPACE + // +optional TablespaceName string `json:"tablespaceName,omitempty"` } @@ -295,7 +290,12 @@ type BackupStatus struct { Method BackupMethod `json:"method,omitempty"` // Whether the backup was online/hot (`true`) or offline/cold (`false`) + // +optional Online *bool `json:"online,omitempty"` + + // A map containing the plugin metadata + // +optional + PluginMetadata map[string]string `json:"pluginMetadata,omitempty"` } // InstanceID contains the information to identify an instance @@ -318,7 +318,7 @@ type InstanceID struct { // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" // +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error" -// Backup is the Schema for the backups API +// A Backup resource is a request for a PostgreSQL backup by the user. type Backup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` @@ -340,216 +340,12 @@ type BackupList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of backups Items []Backup `json:"items"` } -// SetAsFailed marks a certain backup as invalid -func (backupStatus *BackupStatus) SetAsFailed( - err error, -) { - backupStatus.Phase = BackupPhaseFailed - - if err != nil { - backupStatus.Error = err.Error() - } else { - backupStatus.Error = "" - } -} - -// SetAsFinalizing marks a certain backup as finalizing -func (backupStatus *BackupStatus) SetAsFinalizing() { - backupStatus.Phase = BackupPhaseFinalizing - backupStatus.Error = "" -} - -// SetAsCompleted marks a certain backup as completed -func (backupStatus *BackupStatus) SetAsCompleted() { - backupStatus.Phase = BackupPhaseCompleted - backupStatus.Error = "" - backupStatus.StoppedAt = ptr.To(metav1.Now()) -} - -// SetAsStarted marks a certain backup as started -func (backupStatus *BackupStatus) SetAsStarted(podName, containerID string, method BackupMethod) { - backupStatus.Phase = BackupPhaseStarted - backupStatus.InstanceID = &InstanceID{ - PodName: podName, - ContainerID: containerID, - } - backupStatus.Method = method -} - -// SetSnapshotElements sets the Snapshots field from a list of VolumeSnapshot -func (snapshotStatus *BackupSnapshotStatus) SetSnapshotElements(snapshots []volumesnapshot.VolumeSnapshot) { - snapshotNames := make([]BackupSnapshotElementStatus, len(snapshots)) - for idx, volumeSnapshot := range snapshots { - snapshotNames[idx] = BackupSnapshotElementStatus{ - Name: volumeSnapshot.Name, - Type: volumeSnapshot.Annotations[utils.PvcRoleLabelName], - TablespaceName: volumeSnapshot.Labels[utils.TablespaceNameLabelName], - } - } - snapshotStatus.Elements = snapshotNames -} - -// IsDone check if a backup is completed or still in progress -func (backupStatus *BackupStatus) IsDone() bool { - return backupStatus.Phase == BackupPhaseCompleted || backupStatus.Phase == BackupPhaseFailed -} - -// GetOnline tells whether this backup was taken while the database -// was up -func (backupStatus *BackupStatus) GetOnline() bool { - if backupStatus.Online == nil { - return false - } - - return *backupStatus.Online -} - -// IsCompletedVolumeSnapshot checks if a backup is completed using the volume snapshot method. -// It returns true if the backup's method is BackupMethodVolumeSnapshot and its status phase is BackupPhaseCompleted. -// Otherwise, it returns false. -func (backup *Backup) IsCompletedVolumeSnapshot() bool { - return backup != nil && - backup.Spec.Method == BackupMethodVolumeSnapshot && - backup.Status.Phase == BackupPhaseCompleted -} - -// IsInProgress check if a certain backup is in progress or not -func (backupStatus *BackupStatus) IsInProgress() bool { - return backupStatus.Phase == BackupPhasePending || - backupStatus.Phase == BackupPhaseStarted || - backupStatus.Phase == BackupPhaseRunning -} - -// GetPendingBackupNames returns the pending backup list -func (list BackupList) GetPendingBackupNames() []string { - // Retry the backup if another backup is running - pendingBackups := make([]string, 0, len(list.Items)) - for _, concurrentBackup := range list.Items { - if concurrentBackup.Status.IsDone() { - continue - } - if !concurrentBackup.Status.IsInProgress() { - pendingBackups = append(pendingBackups, concurrentBackup.Name) - } - } - - return pendingBackups -} - -// CanExecuteBackup control if we can start a reconciliation loop for a certain backup. -// -// A reconciliation loop can start if: -// - there's no backup running, and if the first of the sorted list of backups -// - the current backup is running and is the first running backup of the list -// -// As a side effect, this function will sort the backup list -func (list *BackupList) CanExecuteBackup(backupName string) bool { - var foundRunningBackup bool - - list.SortByName() - - for _, concurrentBackup := range list.Items { - if concurrentBackup.Status.IsInProgress() { - if backupName == concurrentBackup.Name && !foundRunningBackup { - return true - } - - foundRunningBackup = true - if backupName != concurrentBackup.Name { - return false - } - } - } - - pendingBackups := list.GetPendingBackupNames() - if len(pendingBackups) > 0 && pendingBackups[0] != backupName { - return false - } - - return true -} - -// SortByName sorts the backup items in alphabetical order -func (list *BackupList) SortByName() { - // Sort the list of backups in alphabetical order - sort.Slice(list.Items, func(i, j int) bool { - return strings.Compare(list.Items[i].Name, list.Items[j].Name) <= 0 - }) -} - -// SortByReverseCreationTime sorts the backup items in reverse creation time (starting from the latest one) -func (list *BackupList) SortByReverseCreationTime() { - // Sort the list of backups in reverse creation time - sort.Slice(list.Items, func(i, j int) bool { - return list.Items[i].CreationTimestamp.Time.Compare(list.Items[j].CreationTimestamp.Time) > 0 - }) -} - -// GetStatus gets the backup status -func (backup *Backup) GetStatus() *BackupStatus { - return &backup.Status -} - -// GetMetadata get the metadata -func (backup *Backup) GetMetadata() *metav1.ObjectMeta { - return &backup.ObjectMeta -} - -// GetName get the backup name -func (backup *Backup) GetName() string { - return backup.Name -} - -// GetNamespace get the backup namespace -func (backup *Backup) GetNamespace() string { - return backup.Namespace -} - -// GetAssignedInstance fetches the instance that was assigned to the backup execution -func (backup *Backup) GetAssignedInstance(ctx context.Context, cli client.Client) (*corev1.Pod, error) { - if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 { - return nil, nil - } - - var previouslyElectedPod corev1.Pod - if err := cli.Get( - ctx, - client.ObjectKey{Namespace: backup.Namespace, Name: backup.Status.InstanceID.PodName}, - &previouslyElectedPod, - ); err != nil { - return nil, err - } - - return &previouslyElectedPod, nil -} - -// GetVolumeSnapshotConfiguration overrides the configuration value with the ones specified -// in the backup, if present. -func (backup *Backup) GetVolumeSnapshotConfiguration( - clusterConfig VolumeSnapshotConfiguration, -) VolumeSnapshotConfiguration { - config := clusterConfig - if backup.Spec.Online != nil { - config.Online = backup.Spec.Online - } - - if backup.Spec.OnlineConfiguration != nil { - config.OnlineConfiguration = *backup.Spec.OnlineConfiguration - } - - return config -} - -// IsEmpty checks if the plugin configuration is empty or not -func (configuration *BackupPluginConfiguration) IsEmpty() bool { - return configuration == nil || len(configuration.Name) == 0 -} - func init() { SchemeBuilder.Register(&Backup{}, &BackupList{}) } diff --git a/api/v1/backup_webhook.go b/api/v1/backup_webhook.go deleted file mode 100644 index aec1fd54f9..0000000000 --- a/api/v1/backup_webhook.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/cloudnative-pg/machinery/pkg/log" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// backupLog is for logging in this package. -var backupLog = log.WithName("backup-resource").WithValues("version", "v1") - -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *Backup) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-backup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,verbs=create;update,versions=v1,name=mbackup.cnpg.io,sideEffects=None - -var _ webhook.Defaulter = &Backup{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *Backup) Default() { - backupLog.Info("default", "name", r.Name, "namespace", r.Namespace) -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-backup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,versions=v1,name=vbackup.cnpg.io,sideEffects=None - -var _ webhook.Validator = &Backup{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Backup) ValidateCreate() (admission.Warnings, error) { - backupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - allErrs := r.validate() - if len(allErrs) == 0 { - return nil, nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"}, - r.Name, allErrs) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Backup) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { - backupLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - return r.ValidateCreate() -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Backup) ValidateDelete() (admission.Warnings, error) { - backupLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -func (r *Backup) validate() field.ErrorList { - var result field.ErrorList - - if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { - result = append(result, field.Invalid( - field.NewPath("spec", "method"), - r.Spec.Method, - "Cannot use volumeSnapshot backup method due to missing "+ - "VolumeSnapshot CRD. If you installed the CRD after having "+ - "started the operator, please restart it to enable "+ - "VolumeSnapshot support", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.Online != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "online"), - r.Spec.Online, - "Online parameter can be specified only if the backup method is volumeSnapshot", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "onlineConfiguration"), - r.Spec.OnlineConfiguration, - "OnlineConfiguration parameter can be specified only if the backup method is volumeSnapshot", - )) - } - - if r.Spec.Method == BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() { - result = append(result, field.Invalid( - field.NewPath("spec", "pluginConfiguration"), - r.Spec.OnlineConfiguration, - "cannot be empty when the backup method is plugin", - )) - } - - return result -} diff --git a/api/v1/backup_webhook_test.go b/api/v1/backup_webhook_test.go deleted file mode 100644 index d7de85abe3..0000000000 --- a/api/v1/backup_webhook_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/utils/ptr" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Backup webhook validate", func() { - It("doesn't complain if VolumeSnapshot CRD is present", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodVolumeSnapshot, - }, - } - utils.SetVolumeSnapshot(true) - result := backup.validate() - Expect(result).To(BeEmpty()) - }) - - It("complains if VolumeSnapshot CRD is not present", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodVolumeSnapshot, - }, - } - utils.SetVolumeSnapshot(false) - result := backup.validate() - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.method")) - }) - - It("complains if online is set on a barman backup", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodBarmanObjectStore, - Online: ptr.To(true), - }, - } - result := backup.validate() - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.online")) - }) - - It("complains if onlineConfiguration is set on a barman backup", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodBarmanObjectStore, - OnlineConfiguration: &OnlineConfiguration{}, - }, - } - result := backup.validate() - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) - }) -}) diff --git a/api/v1/base_funcs.go b/api/v1/base_funcs.go new file mode 100644 index 0000000000..09c3698ef4 --- /dev/null +++ b/api/v1/base_funcs.go @@ -0,0 +1,77 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// SecretKeySelectorToCore transforms a SecretKeySelector structure to the +// analogue one in the corev1 namespace +func SecretKeySelectorToCore(selector *SecretKeySelector) *corev1.SecretKeySelector { + if selector == nil { + return nil + } + + return &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: selector.Name, + }, + Key: selector.Key, + } +} + +// ConfigMapKeySelectorToCore transforms a ConfigMapKeySelector structure to the analogue +// one in the corev1 namespace +func ConfigMapKeySelectorToCore(selector *ConfigMapKeySelector) *corev1.ConfigMapKeySelector { + if selector == nil { + return nil + } + + return &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: selector.Name, + }, + Key: selector.Key, + } +} + +// ListStatusPods return a list of active Pods +func ListStatusPods(podList []corev1.Pod) map[PodStatus][]string { + podsNames := make(map[PodStatus][]string) + + for _, pod := range podList { + if !pod.DeletionTimestamp.IsZero() { + continue + } + switch { + case utils.IsPodReady(pod): + podsNames[PodHealthy] = append(podsNames[PodHealthy], pod.Name) + case utils.IsPodActive(pod): + podsNames[PodReplicating] = append(podsNames[PodReplicating], pod.Name) + default: + podsNames[PodFailed] = append(podsNames[PodFailed], pod.Name) + } + } + + return podsNames +} diff --git a/api/v1/base_funcs_test.go b/api/v1/base_funcs_test.go new file mode 100644 index 0000000000..b6f4f83049 --- /dev/null +++ b/api/v1/base_funcs_test.go @@ -0,0 +1,166 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Base type mappings for secrets", func() { + It("correctly map nil values", func() { + Expect(SecretKeySelectorToCore(nil)).To(BeNil()) + }) + + It("correctly map non-nil values", func() { + selector := SecretKeySelector{ + LocalObjectReference: LocalObjectReference{ + Name: "thisName", + }, + Key: "thisKey", + } + + Expect(selector.Name).To(Equal("thisName")) + Expect(selector.Key).To(Equal("thisKey")) + }) +}) + +var _ = Describe("Base type mappings for configmaps", func() { + It("correctly map nil values", func() { + Expect(ConfigMapKeySelectorToCore(nil)).To(BeNil()) + }) + + It("correctly map non-nil values", func() { + selector := ConfigMapKeySelector{ + LocalObjectReference: LocalObjectReference{ + Name: "thisName", + }, + Key: "thisKey", + } + + Expect(selector.Name).To(Equal("thisName")) + Expect(selector.Key).To(Equal("thisKey")) + }) +}) + +var _ = Describe("Properly builds ListStatusPods", func() { + healthyPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "healthyPod", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + activePod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "activePod", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionFalse, + }, + }, + }, + } + failedPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failedPod", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionFalse, + }, + }, + }, + } + + now := metav1.Now() + terminatingPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "terminatingPod", + DeletionTimestamp: &now, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + + It("Detects healthy pods", func() { + podList := []corev1.Pod{healthyPod, healthyPod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod", "healthyPod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) + + It("Detects active pods", func() { + podList := []corev1.Pod{healthyPod, activePod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod"}, + PodReplicating: {"activePod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) + + It("Detects failed pods", func() { + podList := []corev1.Pod{healthyPod, activePod, failedPod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod"}, + PodReplicating: {"activePod"}, + PodFailed: {"failedPod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) + + It("Excludes terminating pods", func() { + podList := []corev1.Pod{healthyPod, activePod, failedPod, terminatingPod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod"}, + PodReplicating: {"activePod"}, + PodFailed: {"failedPod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) +}) diff --git a/api/v1/base_types.go b/api/v1/base_types.go index 325845146b..376b518c0f 100644 --- a/api/v1/base_types.go +++ b/api/v1/base_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,40 +13,41 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 import ( - corev1 "k8s.io/api/core/v1" + machineryapi "github.com/cloudnative-pg/machinery/pkg/api" +) + +// PodStatus represent the possible status of pods +type PodStatus string + +const ( + // PodHealthy means that a Pod is active and ready + PodHealthy = "healthy" + + // PodReplicating means that a Pod is still not ready but still active + PodReplicating = "replicating" + + // PodFailed means that a Pod will not be scheduled again (deleted or evicted) + PodFailed = "failed" ) -// SecretKeySelectorToCore transforms a SecretKeySelector structure to the -// analogue one in the corev1 namespace -func SecretKeySelectorToCore(selector *SecretKeySelector) *corev1.SecretKeySelector { - if selector == nil { - return nil - } - - return &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: selector.LocalObjectReference.Name, - }, - Key: selector.Key, - } -} - -// ConfigMapKeySelectorToCore transforms a ConfigMapKeySelector structure to the analogue -// one in the corev1 namespace -func ConfigMapKeySelectorToCore(selector *ConfigMapKeySelector) *corev1.ConfigMapKeySelector { - if selector == nil { - return nil - } - - return &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: selector.Name, - }, - Key: selector.Key, - } -} +// LocalObjectReference contains enough information to let you locate a +// local object with a known type inside the same namespace +// +kubebuilder:object:generate:=false +type LocalObjectReference = machineryapi.LocalObjectReference + +// SecretKeySelector contains enough information to let you locate +// the key of a Secret +// +kubebuilder:object:generate:=false +type SecretKeySelector = machineryapi.SecretKeySelector + +// ConfigMapKeySelector contains enough information to let you locate +// the key of a ConfigMap +// +kubebuilder:object:generate:=false +type ConfigMapKeySelector = machineryapi.ConfigMapKeySelector diff --git a/api/v1/base_types_test.go b/api/v1/base_types_test.go deleted file mode 100644 index 706f6c7c1f..0000000000 --- a/api/v1/base_types_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Base type mappings for secrets", func() { - It("correctly map nil values", func() { - Expect(SecretKeySelectorToCore(nil)).To(BeNil()) - }) - - It("correctly map non-nil values", func() { - selector := SecretKeySelector{ - LocalObjectReference: LocalObjectReference{ - Name: "thisName", - }, - Key: "thisKey", - } - - Expect(selector.Name).To(Equal("thisName")) - Expect(selector.Key).To(Equal("thisKey")) - }) -}) - -var _ = Describe("Base type mappings for configmaps", func() { - It("correctly map nil values", func() { - Expect(ConfigMapKeySelectorToCore(nil)).To(BeNil()) - }) - - It("correctly map non-nil values", func() { - selector := ConfigMapKeySelector{ - LocalObjectReference: LocalObjectReference{ - Name: "thisName", - }, - Key: "thisKey", - } - - Expect(selector.Name).To(Equal("thisName")) - Expect(selector.Key).To(Equal("thisKey")) - }) -}) diff --git a/api/v1/cluster_conditions.go b/api/v1/cluster_conditions.go new file mode 100644 index 0000000000..597b367ebf --- /dev/null +++ b/api/v1/cluster_conditions.go @@ -0,0 +1,54 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// A Condition that can be used to communicate the Backup progress +var ( + // BackupSucceededCondition is added to a backup + // when it was completed correctly + BackupSucceededCondition = metav1.Condition{ + Type: string(ConditionBackup), + Status: metav1.ConditionTrue, + Reason: string(ConditionReasonLastBackupSucceeded), + Message: "Backup was successful", + } + + // BackupStartingCondition is added to a backup + // when it started + BackupStartingCondition = metav1.Condition{ + Type: string(ConditionBackup), + Status: metav1.ConditionFalse, + Reason: string(ConditionBackupStarted), + Message: "New Backup starting up", + } + + // BuildClusterBackupFailedCondition builds + // ConditionReasonLastBackupFailed condition + BuildClusterBackupFailedCondition = func(err error) metav1.Condition { + return metav1.Condition{ + Type: string(ConditionBackup), + Status: metav1.ConditionFalse, + Reason: string(ConditionReasonLastBackupFailed), + Message: err.Error(), + } + } +) diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go new file mode 100644 index 0000000000..707b3f4287 --- /dev/null +++ b/api/v1/cluster_defaults.go @@ -0,0 +1,362 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + "k8s.io/utils/ptr" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +const ( + // DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries + DefaultMonitoringKey = "queries" + // DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries, + // if configured + DefaultMonitoringConfigMapName = "cnpg-default-monitoring" + // DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries, + // if configured + DefaultMonitoringSecretName = DefaultMonitoringConfigMapName + // DefaultApplicationDatabaseName is the name of application database if not specified + DefaultApplicationDatabaseName = "app" + // DefaultApplicationUserName is the name of application database owner if not specified + DefaultApplicationUserName = DefaultApplicationDatabaseName +) + +// Default apply the defaults to undefined values in a Cluster preserving the user settings +func (r *Cluster) Default() { + r.setDefaults(true) +} + +// SetDefaults apply the defaults to undefined values in a Cluster +func (r *Cluster) SetDefaults() { + r.setDefaults(false) +} + +func (r *Cluster) setDefaults(preserveUserSettings bool) { + // Defaulting the image name if not specified + if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil { + r.Spec.ImageName = configuration.Current.PostgresImageName + } + + // Defaulting the bootstrap method if not specified + if r.Spec.Bootstrap == nil { + r.Spec.Bootstrap = &BootstrapConfiguration{} + } + + // Defaulting initDB if no other bootstrap method was passed + switch { + case r.Spec.Bootstrap.Recovery != nil: + r.defaultRecovery() + case r.Spec.Bootstrap.PgBaseBackup != nil: + r.defaultPgBaseBackup() + default: + r.defaultInitDB() + } + + // Defaulting the pod anti-affinity type if podAntiAffinity + if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) && + r.Spec.Affinity.PodAntiAffinityType == "" { + r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred + } + + if r.Spec.Backup != nil && r.Spec.Backup.Target == "" { + r.Spec.Backup.Target = DefaultBackupTarget + } + + psqlVersion, err := r.GetPostgresqlMajorVersion() + if err == nil { + // The validation error will be already raised by the + // validateImageName function + info := postgres.ConfigurationInfo{ + Settings: postgres.CnpgConfigurationSettings, + MajorVersion: psqlVersion, + UserSettings: r.Spec.PostgresConfiguration.Parameters, + IsReplicaCluster: r.IsReplica(), + PreserveFixedSettingsFromUser: preserveUserSettings, + IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), + IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem, + } + sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters() + r.Spec.PostgresConfiguration.Parameters = sanitizedParameters + } + + if r.Spec.LogLevel == "" { + r.Spec.LogLevel = log.InfoLevelString + } + + // we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty + // and defaultQueries not disabled on cluster crd + if !r.Spec.Monitoring.AreDefaultQueriesDisabled() { + r.defaultMonitoringQueries(configuration.Current) + } + + // If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots + if r.Spec.ReplicationSlots == nil { + r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{} + } + if r.Spec.ReplicationSlots.HighAvailability == nil { + r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_cnpg_", + } + } + if r.Spec.ReplicationSlots.SynchronizeReplicas == nil { + r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{ + Enabled: ptr.To(true), + } + } + + if len(r.Spec.Tablespaces) > 0 { + r.defaultTablespaces() + } + + if r.Spec.PostgresConfiguration.Synchronous != nil && + r.Spec.PostgresConfiguration.Synchronous.DataDurability == "" { + r.Spec.PostgresConfiguration.Synchronous.DataDurability = DataDurabilityLevelRequired + } + + r.setDefaultPlugins(configuration.Current) + r.setProbes() +} + +func (r *Cluster) setDefaultPlugins(config *configuration.Data) { + // Add the list of pre-defined plugins + foundPlugins := stringset.New() + for _, plugin := range r.Spec.Plugins { + foundPlugins.Put(plugin.Name) + } + + for _, pluginName := range config.GetIncludePlugins() { + if !foundPlugins.Has(pluginName) { + r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{ + Name: pluginName, + Enabled: ptr.To(true), + }) + } + } +} + +// defaultTablespaces adds the tablespace owner where the +// user didn't specify it +func (r *Cluster) defaultTablespaces() { + defaultOwner := r.GetApplicationDatabaseOwner() + if len(defaultOwner) == 0 { + defaultOwner = "postgres" + } + + for name, tablespaceConfiguration := range r.Spec.Tablespaces { + if len(tablespaceConfiguration.Owner.Name) == 0 { + tablespaceConfiguration.Owner.Name = defaultOwner + } + r.Spec.Tablespaces[name] = tablespaceConfiguration + } +} + +// defaultMonitoringQueries adds the default monitoring queries configMap +// if not already present in CustomQueriesConfigMap +func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) { + if r.Spec.Monitoring == nil { + r.Spec.Monitoring = &MonitoringConfiguration{} + } + + if config.MonitoringQueriesConfigmap != "" { + var defaultConfigMapQueriesAlreadyPresent bool + // We check if the default queries are already inserted in the monitoring configuration + for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap { + if monitoringConfigMap.Name == DefaultMonitoringConfigMapName { + defaultConfigMapQueriesAlreadyPresent = true + break + } + } + + // If the default queries are already present there is no need to re-add them. + // Please note that in this case that the default configMap could overwrite user existing queries + // depending on the order. This is an accepted behavior because the user willingly defined the order of his array + if !defaultConfigMapQueriesAlreadyPresent { + r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: DefaultMonitoringKey, + }, + }, r.Spec.Monitoring.CustomQueriesConfigMap...) + } + } + + if config.MonitoringQueriesSecret != "" { + var defaultSecretQueriesAlreadyPresent bool + // we check if the default queries are already inserted in the monitoring configuration + for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret { + if monitoringSecret.Name == DefaultMonitoringSecretName { + defaultSecretQueriesAlreadyPresent = true + break + } + } + + if !defaultSecretQueriesAlreadyPresent { + r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, + Key: DefaultMonitoringKey, + }, + }, r.Spec.Monitoring.CustomQueriesSecret...) + } + } +} + +// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed +func (r *Cluster) defaultInitDB() { + if r.Spec.Bootstrap.InitDB == nil { + r.Spec.Bootstrap.InitDB = &BootstrapInitDB{ + Database: DefaultApplicationDatabaseName, + Owner: DefaultApplicationUserName, + } + } + + if r.Spec.Bootstrap.InitDB.Database == "" { + // Set the default only if not executing a monolithic import + if r.Spec.Bootstrap.InitDB.Import == nil || + r.Spec.Bootstrap.InitDB.Import.Type != MonolithSnapshotType { + r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName + } + } + if r.Spec.Bootstrap.InitDB.Owner == "" { + r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database + } + if r.Spec.Bootstrap.InitDB.Encoding == "" { + r.Spec.Bootstrap.InitDB.Encoding = "UTF8" + } + if r.Spec.Bootstrap.InitDB.LocaleCollate == "" { + r.Spec.Bootstrap.InitDB.LocaleCollate = "C" + } + if r.Spec.Bootstrap.InitDB.LocaleCType == "" { + r.Spec.Bootstrap.InitDB.LocaleCType = "C" + } +} + +// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed +func (r *Cluster) defaultRecovery() { + if r.Spec.Bootstrap.Recovery.Database == "" { + r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName + } + if r.Spec.Bootstrap.Recovery.Owner == "" { + r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database + } +} + +// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed +func (r *Cluster) defaultPgBaseBackup() { + if r.Spec.Bootstrap.PgBaseBackup.Database == "" { + r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName + } + if r.Spec.Bootstrap.PgBaseBackup.Owner == "" { + r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database + } +} + +const ( + // defaultRequestTimeout is the default value of the request timeout + defaultRequestTimeout = 1000 + + // defaultConnectionTimeout is the default value of the connection timeout + defaultConnectionTimeout = 1000 +) + +func (r *Cluster) setProbes() { + if r.Spec.Probes == nil { + r.Spec.Probes = &ProbesConfiguration{} + } + + if r.Spec.Probes.Liveness == nil { + r.Spec.Probes.Liveness = &LivenessProbe{} + } + + // we don't override the isolation check if it is already set + if r.Spec.Probes.Liveness.IsolationCheck != nil { + return + } + + // STEP 1: check if the alpha annotation is present, in that case convert it to spec + r.tryConvertAlphaLivenessPinger() + + if r.Spec.Probes.Liveness.IsolationCheck != nil { + return + } + + // STEP 2: set defaults. + r.Spec.Probes.Liveness.IsolationCheck = &IsolationCheckConfiguration{ + Enabled: ptr.To(true), + RequestTimeout: defaultRequestTimeout, + ConnectionTimeout: defaultConnectionTimeout, + } +} + +func (r *Cluster) tryConvertAlphaLivenessPinger() { + if _, ok := r.Annotations[utils.LivenessPingerAnnotationName]; !ok { + return + } + v, err := NewLivenessPingerConfigFromAnnotations(r.Annotations) + if err != nil || v == nil { + // the error will be raised by the validation webhook + return + } + + r.Spec.Probes.Liveness.IsolationCheck = &IsolationCheckConfiguration{ + Enabled: v.Enabled, + RequestTimeout: v.RequestTimeout, + ConnectionTimeout: v.ConnectionTimeout, + } +} + +// NewLivenessPingerConfigFromAnnotations creates a new pinger configuration from the annotations +// in the cluster definition +func NewLivenessPingerConfigFromAnnotations( + annotations map[string]string, +) (*IsolationCheckConfiguration, error) { + v, ok := annotations[utils.LivenessPingerAnnotationName] + if !ok { + return nil, nil + } + + var cfg IsolationCheckConfiguration + if err := json.Unmarshal([]byte(v), &cfg); err != nil { + return nil, fmt.Errorf("while unmarshalling pinger config: %w", err) + } + + if cfg.Enabled == nil { + return nil, fmt.Errorf("pinger config is missing the enabled field") + } + + if cfg.RequestTimeout == 0 { + cfg.RequestTimeout = defaultRequestTimeout + } + if cfg.ConnectionTimeout == 0 { + cfg.ConnectionTimeout = defaultConnectionTimeout + } + + return &cfg, nil +} diff --git a/api/v1/cluster_defaults_test.go b/api/v1/cluster_defaults_test.go new file mode 100644 index 0000000000..608e994003 --- /dev/null +++ b/api/v1/cluster_defaults_test.go @@ -0,0 +1,517 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("cluster default configuration", func() { + It("defaults to creating an application database", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) + }) + + It("defaults the owner user with the database name", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + InitDB: &BootstrapInitDB{ + Database: "appdb", + }, + }, + }, + } + + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb")) + }) + + It("defaults to create an application database if recovery is used", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + Recovery: &BootstrapRecovery{}, + }, + }, + } + cluster.Default() + Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue()) + Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil()) + }) + + It("defaults the owner user with the database name for recovery", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + Recovery: &BootstrapRecovery{ + Database: "appdb", + }, + }, + }, + } + + cluster.Default() + Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb")) + }) + + It("defaults to create an application database if pg_basebackup is used", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + PgBaseBackup: &BootstrapPgBaseBackup{}, + }, + }, + } + cluster.Default() + Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue()) + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil()) + }) + + It("defaults the owner user with the database name for pg_basebackup", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + PgBaseBackup: &BootstrapPgBaseBackup{ + Database: "appdb", + }, + }, + }, + } + + cluster.Default() + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb")) + }) + + It("defaults the PostgreSQL configuration with parameters from the operator", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty()) + }) + + It("defaults the anti-affinity", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Affinity: AffinityConfiguration{}, + }, + } + cluster.Default() + Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred)) + Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil()) + }) + + It("should fill the image name if isn't already set", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName)) + }) + + It("shouldn't set the image name if already present", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + ImageName: "test:13", + }, + } + cluster.Default() + Expect(cluster.Spec.ImageName).To(Equal("test:13")) + }) + + It("should setup the application database name", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) + }) + + It("should set the owner name as the database name", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + InitDB: &BootstrapInitDB{ + Database: "test", + }, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test")) + }) + + It("should not overwrite application database and owner settings", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + InitDB: &BootstrapInitDB{ + Database: "testdb", + Owner: "testuser", + }, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser")) + }) +}) + +var _ = Describe("Default monitoring queries", func() { + It("correctly set the default monitoring queries configmap and secret when none is already specified", func() { + cluster := &Cluster{} + cluster.defaultMonitoringQueries(&configuration.Data{ + MonitoringQueriesSecret: "test-secret", + MonitoringQueriesConfigmap: "test-configmap", + }) + Expect(cluster.Spec.Monitoring).NotTo(BeNil()) + Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) + Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap). + To(ContainElement(ConfigMapKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: DefaultMonitoringKey, + })) + Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) + Expect(cluster.Spec.Monitoring.CustomQueriesSecret). + To(ContainElement(SecretKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, + Key: DefaultMonitoringKey, + })) + }) + testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{ + CustomQueriesConfigMap: []ConfigMapKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: "test2", + }, + }, + CustomQueriesSecret: []SecretKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: "test3", + }, + }, + }}} + It("correctly set the default monitoring queries configmap when other metrics are already specified", func() { + modifiedCluster := testCluster.DeepCopy() + modifiedCluster.defaultMonitoringQueries(&configuration.Data{ + MonitoringQueriesConfigmap: "test-configmap", + }) + + Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). + To(ContainElement(ConfigMapKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: "test2", + })) + + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). + To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret)) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). + To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) + }) + It("correctly set the default monitoring queries secret when other metrics are already specified", func() { + modifiedCluster := testCluster.DeepCopy() + modifiedCluster.defaultMonitoringQueries(&configuration.Data{ + MonitoringQueriesSecret: "test-secret", + }) + + Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). + To(ContainElement(SecretKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, + Key: "test3", + })) + + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). + To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). + To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret)) + }) +}) + +var _ = Describe("setDefaultPlugins", func() { + It("adds pre-defined plugins if not already present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Plugins: []PluginConfiguration{ + {Name: "existing-plugin", Enabled: ptr.To(true)}, + }, + }, + } + config := &configuration.Data{ + IncludePlugins: "predefined-plugin1,predefined-plugin2", + } + + cluster.setDefaultPlugins(config) + + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)})) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) + }) + + It("does not add pre-defined plugins if already present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Plugins: []PluginConfiguration{ + {Name: "predefined-plugin1", Enabled: ptr.To(false)}, + }, + }, + } + config := &configuration.Data{ + IncludePlugins: "predefined-plugin1,predefined-plugin2", + } + + cluster.setDefaultPlugins(config) + + Expect(cluster.Spec.Plugins).To(HaveLen(2)) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)})) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) + }) + + It("handles empty plugin list gracefully", func() { + cluster := &Cluster{} + config := &configuration.Data{ + IncludePlugins: "predefined-plugin1", + } + + cluster.setDefaultPlugins(config) + + Expect(cluster.Spec.Plugins).To(HaveLen(1)) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) + }) +}) + +var _ = Describe("default dataDurability", func() { + It("should default dataDurability to 'required' when synchronous is present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{}, + }, + }, + } + cluster.SetDefaults() + Expect(cluster.Spec.PostgresConfiguration.Synchronous).ToNot(BeNil()) + Expect(cluster.Spec.PostgresConfiguration.Synchronous.DataDurability).To(Equal(DataDurabilityLevelRequired)) + }) + + It("should not touch synchronous if nil", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: nil, + }, + }, + } + cluster.SetDefaults() + Expect(cluster.Spec.PostgresConfiguration.Synchronous).To(BeNil()) + }) + + It("should not change the dataDurability when set", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{ + DataDurability: DataDurabilityLevelPreferred, + }, + }, + }, + } + cluster.SetDefaults() + Expect(cluster.Spec.PostgresConfiguration.Synchronous).ToNot(BeNil()) + Expect(cluster.Spec.PostgresConfiguration.Synchronous.DataDurability).To(Equal(DataDurabilityLevelPreferred)) + }) +}) + +var _ = Describe("NewLivenessPingerConfigFromAnnotations", func() { + It("returns a nil configuration when annotation is not present", func() { + annotations := map[string]string{} + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(BeNil()) + }) + + It("returns an error when annotation contains invalid JSON", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: "{invalid_json", + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).To(HaveOccurred()) + Expect(config).To(BeNil()) + }) + + It("applies default values when timeouts are not specified", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.Enabled).To(HaveValue(BeTrue())) + Expect(config.RequestTimeout).To(Equal(1000)) + Expect(config.ConnectionTimeout).To(Equal(1000)) + }) + + It("preserves values when all fields are specified", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true, "requestTimeout": 300, "connectionTimeout": 600}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.Enabled).To(HaveValue(BeTrue())) + Expect(config.RequestTimeout).To(Equal(300)) + Expect(config.ConnectionTimeout).To(Equal(600)) + }) + + It("correctly sets enabled to false when specified", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": false, "requestTimeout": 300, "connectionTimeout": 600}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.Enabled).To(HaveValue(BeFalse())) + Expect(config.RequestTimeout).To(Equal(300)) + Expect(config.ConnectionTimeout).To(Equal(600)) + }) + + It("correctly handles zero values for timeouts", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true, "requestTimeout": 0, "connectionTimeout": 0}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.RequestTimeout).To(Equal(1000)) + Expect(config.ConnectionTimeout).To(Equal(1000)) + }) +}) + +var _ = Describe("probe defaults", func() { + It("should set isolationCheck probe to true by default when no probes are specified", func() { + cluster := &Cluster{} + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeTrue())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(1000)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(1000)) + }) + + It("should not override isolationCheck probe if already set", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Probes: &ProbesConfiguration{ + Liveness: &LivenessProbe{ + IsolationCheck: &IsolationCheckConfiguration{ + Enabled: ptr.To(false), + RequestTimeout: 300, + ConnectionTimeout: 600, + }, + }, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeFalse())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(300)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(600)) + }) + + It("should set isolationCheck probe when it is not set but liveness probe is present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Probes: &ProbesConfiguration{ + Liveness: &LivenessProbe{}, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeTrue())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(1000)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(1000)) + }) + + It("should convert the existing annotations if set to true", func() { + cluster := &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true, "requestTimeout": 300, "connectionTimeout": 600}`, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeTrue())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(300)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(600)) + }) + + It("should convert the existing annotations if set to false", func() { + cluster := &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": false, "requestTimeout": 300, "connectionTimeout": 600}`, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeFalse())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(300)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(600)) + }) +}) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go new file mode 100644 index 0000000000..3ffecd5846 --- /dev/null +++ b/api/v1/cluster_funcs.go @@ -0,0 +1,1579 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + "regexp" + "slices" + "strconv" + "strings" + "time" + + "github.com/cloudnative-pg/cnpg-i/pkg/identity" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/cloudnative-pg/machinery/pkg/stringset" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/system" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" +) + +// GetOnline tells whether this volume snapshot configuration allows +// online backups +func (configuration *VolumeSnapshotConfiguration) GetOnline() bool { + if configuration.Online == nil { + return true + } + + return *configuration.Online +} + +// GetWaitForArchive tells whether to wait for archive or not +func (o OnlineConfiguration) GetWaitForArchive() bool { + if o.WaitForArchive == nil { + return true + } + + return *o.WaitForArchive +} + +// GetImmediateCheckpoint tells whether to execute an immediate checkpoint +func (o OnlineConfiguration) GetImmediateCheckpoint() bool { + if o.ImmediateCheckpoint == nil { + return false + } + + return *o.ImmediateCheckpoint +} + +// GetPluginConfigurationEnabledPluginNames gets the name of the plugins that are involved +// in the reconciliation of this cluster +func GetPluginConfigurationEnabledPluginNames(pluginList []PluginConfiguration) (result []string) { + pluginNames := make([]string, 0, len(pluginList)) + for _, pluginDeclaration := range pluginList { + if pluginDeclaration.IsEnabled() { + pluginNames = append(pluginNames, pluginDeclaration.Name) + } + } + return pluginNames +} + +// GetInstanceEnabledPluginNames gets the name of the plugins that are available to the instance container +func (cluster *Cluster) GetInstanceEnabledPluginNames() (result []string) { + var instance []string + for _, pluginStatus := range cluster.Status.PluginStatus { + if slices.Contains(pluginStatus.Capabilities, + identity.PluginCapability_Service_TYPE_INSTANCE_SIDECAR_INJECTION.String()) { + instance = append(instance, pluginStatus.Name) + } + } + + enabled := GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) + + var instanceEnabled []string + for _, pluginName := range instance { + if slices.Contains(enabled, pluginName) { + instanceEnabled = append(instanceEnabled, pluginName) + } + } + + return instanceEnabled +} + +// GetJobEnabledPluginNames gets the name of the plugins that are available to the job container +func (cluster *Cluster) GetJobEnabledPluginNames() (result []string) { + var instance []string + for _, pluginStatus := range cluster.Status.PluginStatus { + if slices.Contains(pluginStatus.Capabilities, + identity.PluginCapability_Service_TYPE_INSTANCE_JOB_SIDECAR_INJECTION.String()) { + instance = append(instance, pluginStatus.Name) + } + } + + enabled := GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) + + var instanceEnabled []string + for _, pluginName := range instance { + if slices.Contains(enabled, pluginName) { + instanceEnabled = append(instanceEnabled, pluginName) + } + } + + return instanceEnabled +} + +// GetExternalClustersEnabledPluginNames gets the name of the plugins that are +// involved in the reconciliation of this external cluster list. This +// list is usually composed by the plugins that need to be active to +// recover data from the external clusters. +func GetExternalClustersEnabledPluginNames(externalClusters []ExternalCluster) (result []string) { + pluginNames := make([]string, 0, len(externalClusters)) + for _, externalCluster := range externalClusters { + if externalCluster.PluginConfiguration != nil { + pluginNames = append(pluginNames, externalCluster.PluginConfiguration.Name) + } + } + return pluginNames +} + +// GetShmLimit gets the `/dev/shm` memory size limit +func (e *EphemeralVolumesSizeLimitConfiguration) GetShmLimit() *resource.Quantity { + if e == nil { + return nil + } + + return e.Shm +} + +// GetTemporaryDataLimit gets the temporary storage size limit +func (e *EphemeralVolumesSizeLimitConfiguration) GetTemporaryDataLimit() *resource.Quantity { + if e == nil { + return nil + } + + return e.TemporaryData +} + +// MergeMetadata adds the passed custom annotations and labels in the service account. +func (st *ServiceAccountTemplate) MergeMetadata(sa *corev1.ServiceAccount) { + if st == nil { + return + } + if sa.Labels == nil { + sa.Labels = map[string]string{} + } + if sa.Annotations == nil { + sa.Annotations = map[string]string{} + } + + utils.MergeMap(sa.Labels, st.Metadata.Labels) + utils.MergeMap(sa.Annotations, st.Metadata.Annotations) +} + +// MatchesTopology checks if the two topologies have +// the same label values (labels are specified in SyncReplicaElectionConstraints.NodeLabelsAntiAffinity) +func (topologyLabels PodTopologyLabels) MatchesTopology(instanceTopology PodTopologyLabels) bool { + for mainLabelName, mainLabelValue := range topologyLabels { + if mainLabelValue != instanceTopology[mainLabelName] { + return false + } + } + return true +} + +// GetAvailableArchitecture returns an AvailableArchitecture given it's name. It returns nil if it's not found. +func (status *ClusterStatus) GetAvailableArchitecture(archName string) *AvailableArchitecture { + for _, architecture := range status.AvailableArchitectures { + if architecture.GoArch == archName { + return &architecture + } + } + return nil +} + +type regexErrors struct { + errs []error +} + +func (r regexErrors) Error() string { + if len(r.errs) == 0 { + return "" + } + var sb strings.Builder + sb.WriteString("failed to compile regex patterns: ") + for _, err := range r.errs { + sb.WriteString(err.Error()) + sb.WriteString("; ") + } + return sb.String() +} + +func (r *SynchronizeReplicasConfiguration) compileRegex() ([]regexp.Regexp, error) { + if r == nil { + return nil, nil + } + + var ( + compiledPatterns = make([]regexp.Regexp, len(r.ExcludePatterns)) + compileErrors []error + ) + + for idx, pattern := range r.ExcludePatterns { + re, err := regexp.Compile(pattern) + if err != nil { + compileErrors = append(compileErrors, err) + continue + } + compiledPatterns[idx] = *re + } + + if len(compileErrors) > 0 { + return nil, regexErrors{errs: compileErrors} + } + + return compiledPatterns, nil +} + +// GetEnabled returns false if synchronized replication slots are disabled, defaults to true +func (r *SynchronizeReplicasConfiguration) GetEnabled() bool { + if r != nil && r.Enabled != nil { + return *r.Enabled + } + return true +} + +// ValidateRegex returns all the errors that happened during the regex compilation +func (r *SynchronizeReplicasConfiguration) ValidateRegex() error { + _, err := r.compileRegex() + return err +} + +// IsExcludedByUser returns if a replication slot should not be reconciled on the replicas +func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bool, error) { + if r == nil { + return false, nil + } + + compiledPatterns, err := r.compileRegex() + // this is an unexpected issue, validation should happen at webhook level + if err != nil { + return false, err + } + + for _, re := range compiledPatterns { + if re.MatchString(slotName) { + return true, nil + } + } + + return false, nil +} + +// GetEnabled returns false if replication slots are disabled, default is true +func (r *ReplicationSlotsConfiguration) GetEnabled() bool { + return r.SynchronizeReplicas.GetEnabled() || r.HighAvailability.GetEnabled() +} + +// GetUpdateInterval returns the update interval, defaulting to DefaultReplicationSlotsUpdateInterval if empty +func (r *ReplicationSlotsConfiguration) GetUpdateInterval() time.Duration { + if r == nil || r.UpdateInterval <= 0 { + return DefaultReplicationSlotsUpdateInterval + } + return time.Duration(r.UpdateInterval) * time.Second +} + +// GetSlotPrefix returns the HA slot prefix, defaulting to DefaultReplicationSlotsHASlotPrefix if empty +func (r *ReplicationSlotsHAConfiguration) GetSlotPrefix() string { + if r == nil || r.SlotPrefix == "" { + return DefaultReplicationSlotsHASlotPrefix + } + return r.SlotPrefix +} + +// GetSlotNameFromInstanceName returns the slot name, given the instance name. +// It returns an empty string if High Availability Replication Slots are disabled +func (r *ReplicationSlotsHAConfiguration) GetSlotNameFromInstanceName(instanceName string) string { + if r == nil || !r.GetEnabled() { + return "" + } + + slotName := fmt.Sprintf( + "%s%s", + r.GetSlotPrefix(), + instanceName, + ) + sanitizedName := slotNameNegativeRegex.ReplaceAllString(strings.ToLower(slotName), "_") + + return sanitizedName +} + +// GetEnabled returns false if replication slots are disabled, default is true +func (r *ReplicationSlotsHAConfiguration) GetEnabled() bool { + if r != nil && r.Enabled != nil { + return *r.Enabled + } + return true +} + +// ToPostgreSQLConfigurationKeyword returns the contained value as a valid PostgreSQL parameter to be injected +// in the 'synchronous_standby_names' field +func (s SynchronousReplicaConfigurationMethod) ToPostgreSQLConfigurationKeyword() string { + return strings.ToUpper(string(s)) +} + +func (c *CertificatesConfiguration) getServerAltDNSNames() []string { + if c == nil { + return nil + } + + return c.ServerAltDNSNames +} + +// HasElements returns true if it contains any Reference +func (s *SQLRefs) HasElements() bool { + if s == nil { + return false + } + + return len(s.ConfigMapRefs) != 0 || + len(s.SecretRefs) != 0 +} + +// GetBackupID gets the backup ID +func (target *RecoveryTarget) GetBackupID() string { + return target.BackupID +} + +// GetTargetTime gets the target time +func (target *RecoveryTarget) GetTargetTime() string { + return target.TargetTime +} + +// GetTargetLSN gets the target LSN +func (target *RecoveryTarget) GetTargetLSN() string { + return target.TargetLSN +} + +// GetTargetTLI gets the target timeline +func (target *RecoveryTarget) GetTargetTLI() string { + return target.TargetTLI +} + +// GetSizeOrNil returns the requests storage size +func (s *StorageConfiguration) GetSizeOrNil() *resource.Quantity { + if s == nil { + return nil + } + + if s.Size != "" { + quantity, err := resource.ParseQuantity(s.Size) + if err != nil { + return nil + } + + return &quantity + } + + if s.PersistentVolumeClaimTemplate != nil { + return s.PersistentVolumeClaimTemplate.Resources.Requests.Storage() + } + + return nil +} + +// AreDefaultQueriesDisabled checks whether default monitoring queries should be disabled +func (m *MonitoringConfiguration) AreDefaultQueriesDisabled() bool { + return m != nil && m.DisableDefaultQueries != nil && *m.DisableDefaultQueries +} + +// GetServerName returns the server name, defaulting to the name of the external cluster or using the one specified +// in the BarmanObjectStore +func (in ExternalCluster) GetServerName() string { + if in.BarmanObjectStore != nil && in.BarmanObjectStore.ServerName != "" { + return in.BarmanObjectStore.ServerName + } + return in.Name +} + +// IsEnabled returns true when this plugin is enabled +func (config *PluginConfiguration) IsEnabled() bool { + if config.Enabled == nil { + return true + } + return *config.Enabled +} + +// GetRoleSecretsName gets the name of the secret which is used to store the role's password +func (roleConfiguration *RoleConfiguration) GetRoleSecretsName() string { + if roleConfiguration.PasswordSecret != nil { + return roleConfiguration.PasswordSecret.Name + } + return "" +} + +// GetRoleInherit return the inherit attribute of a roleConfiguration +func (roleConfiguration *RoleConfiguration) GetRoleInherit() bool { + if roleConfiguration.Inherit != nil { + return *roleConfiguration.Inherit + } + return true +} + +// SetManagedRoleSecretVersion Add or update or delete the resource version of the managed role secret +func (secretResourceVersion *SecretsResourceVersion) SetManagedRoleSecretVersion(secret string, version *string) { + if secretResourceVersion.ManagedRoleSecretVersions == nil { + secretResourceVersion.ManagedRoleSecretVersions = make(map[string]string) + } + if version == nil { + delete(secretResourceVersion.ManagedRoleSecretVersions, secret) + } else { + secretResourceVersion.ManagedRoleSecretVersions[secret] = *version + } +} + +// SetExternalClusterSecretVersion Add or update or delete the resource version of the secret used in external clusters +func (secretResourceVersion *SecretsResourceVersion) SetExternalClusterSecretVersion( + secretName string, + version *string, +) { + if secretResourceVersion.ExternalClusterSecretVersions == nil { + secretResourceVersion.ExternalClusterSecretVersions = make(map[string]string) + } + + if version == nil { + delete(secretResourceVersion.ExternalClusterSecretVersions, secretName) + return + } + + secretResourceVersion.ExternalClusterSecretVersions[secretName] = *version +} + +// SetInContext records the cluster in the given context +func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { + return context.WithValue(ctx, contextutils.ContextKeyCluster, cluster) +} + +// GetPostgresqlMajorVersion gets the PostgreSQL image major version detecting it from the +// image name or from the ImageCatalogRef. +func (cluster *Cluster) GetPostgresqlMajorVersion() (int, error) { + if cluster.Spec.ImageCatalogRef != nil { + return cluster.Spec.ImageCatalogRef.Major, nil + } + + if cluster.Spec.ImageName != "" { + imgVersion, err := version.FromTag(reference.New(cluster.Spec.ImageName).Tag) + if err != nil { + return 0, fmt.Errorf("cannot parse image name %q: %w", cluster.Spec.ImageName, err) + } + return int(imgVersion.Major()), nil //nolint:gosec + } + + // Fallback for unit tests where a cluster is created without status or defaults + imgVersion, err := version.FromTag(reference.New(configuration.Current.PostgresImageName).Tag) + if err != nil { + return 0, fmt.Errorf("cannot parse default image name %q: %w", configuration.Current.PostgresImageName, err) + } + return int(imgVersion.Major()), nil //nolint:gosec +} + +// GetImagePullSecret get the name of the pull secret to use +// to download the PostgreSQL image +func (cluster *Cluster) GetImagePullSecret() string { + return cluster.Name + ClusterSecretSuffix +} + +// GetSuperuserSecretName get the secret name of the PostgreSQL superuser +func (cluster *Cluster) GetSuperuserSecretName() string { + if cluster.Spec.SuperuserSecret != nil && + cluster.Spec.SuperuserSecret.Name != "" { + return cluster.Spec.SuperuserSecret.Name + } + + return fmt.Sprintf("%v%v", cluster.Name, SuperUserSecretSuffix) +} + +// GetEnableLDAPAuth return true if bind or bind+search method are +// configured in the cluster configuration +func (cluster *Cluster) GetEnableLDAPAuth() bool { + if cluster.Spec.PostgresConfiguration.LDAP != nil && + (cluster.Spec.PostgresConfiguration.LDAP.BindAsAuth != nil || + cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil) { + return true + } + return false +} + +// GetLDAPSecretName gets the secret name containing the LDAP password +func (cluster *Cluster) GetLDAPSecretName() string { + if cluster.Spec.PostgresConfiguration.LDAP != nil && + cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil && + cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword != nil { + return cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword.Name + } + return "" +} + +// ContainsManagedRolesConfiguration returns true iff there are managed roles configured +func (cluster *Cluster) ContainsManagedRolesConfiguration() bool { + return cluster.Spec.Managed != nil && len(cluster.Spec.Managed.Roles) > 0 +} + +// GetExternalClusterSecrets returns the secrets used by external Clusters +func (cluster *Cluster) GetExternalClusterSecrets() *stringset.Data { + secrets := stringset.New() + + if cluster.Spec.ExternalClusters != nil { + for _, externalCluster := range cluster.Spec.ExternalClusters { + if externalCluster.Password != nil { + secrets.Put(externalCluster.Password.Name) + } + if externalCluster.SSLKey != nil { + secrets.Put(externalCluster.SSLKey.Name) + } + if externalCluster.SSLCert != nil { + secrets.Put(externalCluster.SSLCert.Name) + } + if externalCluster.SSLRootCert != nil { + secrets.Put(externalCluster.SSLRootCert.Name) + } + } + } + return secrets +} + +// UsesSecretInManagedRoles checks if the given secret name is used in a managed role +func (cluster *Cluster) UsesSecretInManagedRoles(secretName string) bool { + if !cluster.ContainsManagedRolesConfiguration() { + return false + } + for _, role := range cluster.Spec.Managed.Roles { + if role.PasswordSecret != nil && role.PasswordSecret.Name == secretName { + return true + } + } + return false +} + +// GetApplicationSecretName get the name of the application secret for any bootstrap type +func (cluster *Cluster) GetApplicationSecretName() string { + bootstrap := cluster.Spec.Bootstrap + if bootstrap == nil { + return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) + } + recovery := bootstrap.Recovery + if recovery != nil && recovery.Secret != nil && recovery.Secret.Name != "" { + return recovery.Secret.Name + } + + pgBaseBackup := bootstrap.PgBaseBackup + if pgBaseBackup != nil && pgBaseBackup.Secret != nil && pgBaseBackup.Secret.Name != "" { + return pgBaseBackup.Secret.Name + } + + initDB := bootstrap.InitDB + if initDB != nil && initDB.Secret != nil && initDB.Secret.Name != "" { + return initDB.Secret.Name + } + + return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) +} + +// GetApplicationDatabaseName get the name of the application database for a specific bootstrap +func (cluster *Cluster) GetApplicationDatabaseName() string { + bootstrap := cluster.Spec.Bootstrap + if bootstrap == nil { + return "" + } + + if bootstrap.Recovery != nil && bootstrap.Recovery.Database != "" { + return bootstrap.Recovery.Database + } + + if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Database != "" { + return bootstrap.PgBaseBackup.Database + } + + if bootstrap.InitDB != nil && bootstrap.InitDB.Database != "" { + return bootstrap.InitDB.Database + } + + return "" +} + +// GetApplicationDatabaseOwner get the owner user of the application database for a specific bootstrap +func (cluster *Cluster) GetApplicationDatabaseOwner() string { + bootstrap := cluster.Spec.Bootstrap + if bootstrap == nil { + return "" + } + + if bootstrap.Recovery != nil && bootstrap.Recovery.Owner != "" { + return bootstrap.Recovery.Owner + } + + if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Owner != "" { + return bootstrap.PgBaseBackup.Owner + } + + if bootstrap.InitDB != nil && bootstrap.InitDB.Owner != "" { + return bootstrap.InitDB.Owner + } + + return "" +} + +// GetServerCASecretName get the name of the secret containing the CA +// of the cluster +func (cluster *Cluster) GetServerCASecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerCASecret != "" { + return cluster.Spec.Certificates.ServerCASecret + } + return fmt.Sprintf("%v%v", cluster.Name, DefaultServerCaSecretSuffix) +} + +// GetServerTLSSecretName get the name of the secret containing the +// certificate that is used for the PostgreSQL servers +func (cluster *Cluster) GetServerTLSSecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerTLSSecret != "" { + return cluster.Spec.Certificates.ServerTLSSecret + } + return fmt.Sprintf("%v%v", cluster.Name, ServerSecretSuffix) +} + +// GetClientCASecretName get the name of the secret containing the CA +// of the cluster +func (cluster *Cluster) GetClientCASecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ClientCASecret != "" { + return cluster.Spec.Certificates.ClientCASecret + } + return fmt.Sprintf("%v%v", cluster.Name, ClientCaSecretSuffix) +} + +// GetFixedInheritedAnnotations gets the annotations that should be +// inherited by all resources according to the cluster spec and the operator version +func (cluster *Cluster) GetFixedInheritedAnnotations() map[string]string { + var meta metav1.ObjectMeta + utils.SetOperatorVersion(&meta, versions.Version) + + if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Annotations == nil { + return meta.Annotations + } + + utils.MergeMap(meta.Annotations, cluster.Spec.InheritedMetadata.Annotations) + + return meta.Annotations +} + +// GetFixedInheritedLabels gets the labels that should be +// inherited by all resources according the cluster spec +func (cluster *Cluster) GetFixedInheritedLabels() map[string]string { + if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Labels == nil { + return nil + } + return cluster.Spec.InheritedMetadata.Labels +} + +// GetReplicationSecretName get the name of the secret for the replication user +func (cluster *Cluster) GetReplicationSecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ReplicationTLSSecret != "" { + return cluster.Spec.Certificates.ReplicationTLSSecret + } + return fmt.Sprintf("%v%v", cluster.Name, ReplicationSecretSuffix) +} + +// GetServiceAnyName return the name of the service that is used as DNS +// domain for all the nodes, even if they are not ready +func (cluster *Cluster) GetServiceAnyName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceAnySuffix) +} + +// GetServiceReadName return the default name of the service that is used for +// read transactions (including the primary) +func (cluster *Cluster) GetServiceReadName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceReadSuffix) +} + +// GetServiceReadOnlyName return the default name of the service that is used for +// read-only transactions (excluding the primary) +func (cluster *Cluster) GetServiceReadOnlyName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceReadOnlySuffix) +} + +// GetServiceReadWriteName return the default name of the service that is used for +// read-write transactions +func (cluster *Cluster) GetServiceReadWriteName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceReadWriteSuffix) +} + +// GetMaxStartDelay get the amount of time of startDelay config option +func (cluster *Cluster) GetMaxStartDelay() int32 { + if cluster.Spec.MaxStartDelay > 0 { + return cluster.Spec.MaxStartDelay + } + return DefaultStartupDelay +} + +// GetMaxStopDelay get the amount of time PostgreSQL has to stop +func (cluster *Cluster) GetMaxStopDelay() int32 { + if cluster.Spec.MaxStopDelay > 0 { + return cluster.Spec.MaxStopDelay + } + return 1800 +} + +// GetSmartShutdownTimeout is used to ensure that smart shutdown timeout is a positive integer +func (cluster *Cluster) GetSmartShutdownTimeout() int32 { + if cluster.Spec.SmartShutdownTimeout != nil { + return *cluster.Spec.SmartShutdownTimeout + } + return 180 +} + +// GetRestartTimeout is used to have a timeout for operations that involve +// a restart of a PostgreSQL instance +func (cluster *Cluster) GetRestartTimeout() time.Duration { + return time.Duration(cluster.GetMaxStopDelay()+cluster.GetMaxStartDelay()) * time.Second +} + +// GetMaxSwitchoverDelay get the amount of time PostgreSQL has to stop before switchover +func (cluster *Cluster) GetMaxSwitchoverDelay() int32 { + if cluster.Spec.MaxSwitchoverDelay > 0 { + return cluster.Spec.MaxSwitchoverDelay + } + return DefaultMaxSwitchoverDelay +} + +// GetPrimaryUpdateStrategy get the cluster primary update strategy, +// defaulting to unsupervised +func (cluster *Cluster) GetPrimaryUpdateStrategy() PrimaryUpdateStrategy { + strategy := cluster.Spec.PrimaryUpdateStrategy + if strategy == "" { + return PrimaryUpdateStrategyUnsupervised + } + + return strategy +} + +// GetPrimaryUpdateMethod get the cluster primary update method, +// defaulting to restart +func (cluster *Cluster) GetPrimaryUpdateMethod() PrimaryUpdateMethod { + strategy := cluster.Spec.PrimaryUpdateMethod + if strategy == "" { + return PrimaryUpdateMethodRestart + } + + return strategy +} + +// GetEnablePDB get the cluster EnablePDB value, defaults to true +func (cluster *Cluster) GetEnablePDB() bool { + if cluster.Spec.EnablePDB == nil { + return true + } + + return *cluster.Spec.EnablePDB +} + +// IsNodeMaintenanceWindowInProgress check if the upgrade mode is active or not +func (cluster *Cluster) IsNodeMaintenanceWindowInProgress() bool { + return cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.InProgress +} + +// GetPgCtlTimeoutForPromotion returns the timeout that should be waited for an instance to be promoted +// to primary. As default, DefaultPgCtlTimeoutForPromotion is big enough to simulate an infinite timeout +func (cluster *Cluster) GetPgCtlTimeoutForPromotion() int32 { + timeout := cluster.Spec.PostgresConfiguration.PgCtlTimeoutForPromotion + if timeout == 0 { + return DefaultPgCtlTimeoutForPromotion + } + return timeout +} + +// IsReusePVCEnabled check if in a maintenance window we should reuse PVCs +func (cluster *Cluster) IsReusePVCEnabled() bool { + reusePVC := true + if cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.ReusePVC != nil { + reusePVC = *cluster.Spec.NodeMaintenanceWindow.ReusePVC + } + return reusePVC +} + +// IsInstanceFenced check if in a given instance should be fenced +func (cluster *Cluster) IsInstanceFenced(instance string) bool { + fencedInstances, err := utils.GetFencedInstances(cluster.Annotations) + if err != nil { + return false + } + + if fencedInstances.Has(utils.FenceAllInstances) { + return true + } + return fencedInstances.Has(instance) +} + +// ShouldResizeInUseVolumes is true when we should resize PVC we already +// created +func (cluster *Cluster) ShouldResizeInUseVolumes() bool { + if cluster.Spec.StorageConfiguration.ResizeInUseVolumes == nil { + return true + } + + return *cluster.Spec.StorageConfiguration.ResizeInUseVolumes +} + +// ShouldCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase, we need to create a secret to store application credentials +func (cluster *Cluster) ShouldCreateApplicationSecret() bool { + return cluster.ShouldInitDBCreateApplicationSecret() || + cluster.ShouldPgBaseBackupCreateApplicationSecret() || + cluster.ShouldRecoveryCreateApplicationSecret() +} + +// ShouldInitDBCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase using initDB, we need to create an new application secret +func (cluster *Cluster) ShouldInitDBCreateApplicationSecret() bool { + return cluster.ShouldInitDBCreateApplicationDatabase() && + (cluster.Spec.Bootstrap.InitDB.Secret == nil || + cluster.Spec.Bootstrap.InitDB.Secret.Name == "") +} + +// ShouldPgBaseBackupCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase using pg_basebackup, we need to create an application secret +func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationSecret() bool { + return cluster.ShouldPgBaseBackupCreateApplicationDatabase() && + (cluster.Spec.Bootstrap.PgBaseBackup.Secret == nil || + cluster.Spec.Bootstrap.PgBaseBackup.Secret.Name == "") +} + +// ShouldRecoveryCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase using recovery, we need to create an application secret +func (cluster *Cluster) ShouldRecoveryCreateApplicationSecret() bool { + return cluster.ShouldRecoveryCreateApplicationDatabase() && + (cluster.Spec.Bootstrap.Recovery.Secret == nil || + cluster.Spec.Bootstrap.Recovery.Secret.Name == "") +} + +// ShouldCreateApplicationDatabase returns true if for this cluster, +// during the bootstrap phase, we need to create an application database +func (cluster *Cluster) ShouldCreateApplicationDatabase() bool { + return cluster.ShouldInitDBCreateApplicationDatabase() || + cluster.ShouldRecoveryCreateApplicationDatabase() || + cluster.ShouldPgBaseBackupCreateApplicationDatabase() +} + +// ShouldInitDBRunPostInitApplicationSQLRefs returns true if for this cluster, +// during the bootstrap phase using initDB, we need to run post init SQL files +// for the application database from provided references. +func (cluster *Cluster) ShouldInitDBRunPostInitApplicationSQLRefs() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + return cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQLRefs.HasElements() +} + +// ShouldInitDBRunPostInitTemplateSQLRefs returns true if for this cluster, +// during the bootstrap phase using initDB, we need to run post init SQL files +// for the `template1` database from provided references. +func (cluster *Cluster) ShouldInitDBRunPostInitTemplateSQLRefs() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + return cluster.Spec.Bootstrap.InitDB.PostInitTemplateSQLRefs.HasElements() +} + +// ShouldInitDBRunPostInitSQLRefs returns true if for this cluster, +// during the bootstrap phase using initDB, we need to run post init SQL files +// for the `postgres` database from provided references. +func (cluster *Cluster) ShouldInitDBRunPostInitSQLRefs() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + return cluster.Spec.Bootstrap.InitDB.PostInitSQLRefs.HasElements() +} + +// ShouldInitDBCreateApplicationDatabase returns true if the application database needs to be created during initdb +// job +func (cluster *Cluster) ShouldInitDBCreateApplicationDatabase() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + initDBParameters := cluster.Spec.Bootstrap.InitDB + return initDBParameters.Owner != "" && initDBParameters.Database != "" +} + +// ShouldPgBaseBackupCreateApplicationDatabase returns true if the application database needs to be created during the +// pg_basebackup job +func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationDatabase() bool { + // we skip creating the application database if cluster is a replica + if cluster.IsReplica() { + return false + } + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.PgBaseBackup == nil { + return false + } + + pgBaseBackupParameters := cluster.Spec.Bootstrap.PgBaseBackup + return pgBaseBackupParameters.Owner != "" && pgBaseBackupParameters.Database != "" +} + +// ShouldRecoveryCreateApplicationDatabase returns true if the application database needs to be created during the +// recovery job +func (cluster *Cluster) ShouldRecoveryCreateApplicationDatabase() bool { + // we skip creating the application database if cluster is a replica + if cluster.IsReplica() { + return false + } + + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.Recovery == nil { + return false + } + + recoveryParameters := cluster.Spec.Bootstrap.Recovery + return recoveryParameters.Owner != "" && recoveryParameters.Database != "" +} + +// ShouldCreateProjectedVolume returns whether we should create the projected all in one volume +func (cluster *Cluster) ShouldCreateProjectedVolume() bool { + return cluster.Spec.ProjectedVolumeTemplate != nil +} + +// ShouldCreateWalArchiveVolume returns whether we should create the wal archive volume +func (cluster *Cluster) ShouldCreateWalArchiveVolume() bool { + return cluster.Spec.WalStorage != nil +} + +// ShouldPromoteFromReplicaCluster returns true if the cluster should promote +func (cluster *Cluster) ShouldPromoteFromReplicaCluster() bool { + // If there's no replica cluster configuration there's no + // promotion token too, so we don't need to promote. + if cluster.Spec.ReplicaCluster == nil { + return false + } + + // If we don't have a promotion token, we don't need to promote + if len(cluster.Spec.ReplicaCluster.PromotionToken) == 0 { + return false + } + + // If the current token was already used, there's no need to + // promote + if cluster.Spec.ReplicaCluster.PromotionToken == cluster.Status.LastPromotionToken { + return false + } + return true +} + +// ContainsTablespaces returns true if for this cluster, we need to create tablespaces +func (cluster *Cluster) ContainsTablespaces() bool { + return len(cluster.Spec.Tablespaces) != 0 +} + +// GetPostgresUID returns the UID that is being used for the "postgres" +// user +func (cluster Cluster) GetPostgresUID() int64 { + if cluster.Spec.PostgresUID == 0 { + return DefaultPostgresUID + } + return cluster.Spec.PostgresUID +} + +// GetPostgresGID returns the GID that is being used for the "postgres" +// user +func (cluster Cluster) GetPostgresGID() int64 { + if cluster.Spec.PostgresGID == 0 { + return DefaultPostgresGID + } + return cluster.Spec.PostgresGID +} + +// ExternalCluster gets the external server with a known name, returning +// true if the server was found and false otherwise +func (cluster Cluster) ExternalCluster(name string) (ExternalCluster, bool) { + for _, server := range cluster.Spec.ExternalClusters { + if server.Name == name { + return server, true + } + } + + return ExternalCluster{}, false +} + +// IsReplica checks if this is a replica cluster or not +func (cluster Cluster) IsReplica() bool { + // Before introducing the "primary" field, the + // "enabled" parameter was declared as a "boolean" + // and was not declared "omitempty". + // + // Legacy replica clusters will have the "replica" stanza + // and the "enabled" field set explicitly to true. + // + // The following code is designed to not change the + // previous semantics. + r := cluster.Spec.ReplicaCluster + if r == nil { + return false + } + + if r.Enabled != nil { + return *r.Enabled + } + + clusterName := r.Self + if len(clusterName) == 0 { + clusterName = cluster.Name + } + + return clusterName != r.Primary +} + +var slotNameNegativeRegex = regexp.MustCompile("[^a-z0-9_]+") + +// GetSlotNameFromInstanceName returns the slot name, given the instance name. +// It returns an empty string if High Availability Replication Slots are disabled +func (cluster Cluster) GetSlotNameFromInstanceName(instanceName string) string { + if cluster.Spec.ReplicationSlots == nil || + cluster.Spec.ReplicationSlots.HighAvailability == nil || + !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() { + return "" + } + + return cluster.Spec.ReplicationSlots.HighAvailability.GetSlotNameFromInstanceName(instanceName) +} + +// GetBarmanEndpointCAForReplicaCluster checks if this is a replica cluster which needs barman endpoint CA +func (cluster Cluster) GetBarmanEndpointCAForReplicaCluster() *SecretKeySelector { + if !cluster.IsReplica() { + return nil + } + sourceName := cluster.Spec.ReplicaCluster.Source + externalCluster, found := cluster.ExternalCluster(sourceName) + if !found || externalCluster.BarmanObjectStore == nil { + return nil + } + return externalCluster.BarmanObjectStore.EndpointCA +} + +// GetClusterAltDNSNames returns all the names needed to build a valid Server Certificate +func (cluster *Cluster) GetClusterAltDNSNames() []string { + buildServiceNames := func(serviceName string, enabled bool) []string { + if !enabled { + return nil + } + return []string{ + serviceName, + fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc.%s", serviceName, cluster.Namespace, configuration.Current.KubernetesClusterDomain), + } + } + altDNSNames := slices.Concat( + buildServiceNames(cluster.GetServiceReadWriteName(), cluster.IsReadWriteServiceEnabled()), + buildServiceNames(cluster.GetServiceReadName(), cluster.IsReadServiceEnabled()), + buildServiceNames(cluster.GetServiceReadOnlyName(), cluster.IsReadOnlyServiceEnabled()), + ) + + if cluster.Spec.Managed != nil && cluster.Spec.Managed.Services != nil { + for _, service := range cluster.Spec.Managed.Services.Additional { + altDNSNames = append(altDNSNames, buildServiceNames(service.ServiceTemplate.ObjectMeta.Name, true)...) + } + } + + return append(altDNSNames, cluster.Spec.Certificates.getServerAltDNSNames()...) +} + +// UsesSecret checks whether a given secret is used by a Cluster. +// +// This function is also used to discover the set of clusters that +// should be reconciled when a certain secret changes. +func (cluster *Cluster) UsesSecret(secret string) bool { + if _, ok := cluster.Status.SecretsResourceVersion.Metrics[secret]; ok { + return true + } + certificates := cluster.Status.Certificates + switch secret { + case cluster.GetSuperuserSecretName(), + cluster.GetApplicationSecretName(), + certificates.ClientCASecret, + certificates.ReplicationTLSSecret, + certificates.ServerCASecret, + certificates.ServerTLSSecret: + return true + } + + if cluster.UsesSecretInManagedRoles(secret) { + return true + } + + if cluster.Spec.Backup.IsBarmanEndpointCASet() && cluster.Spec.Backup.BarmanObjectStore.EndpointCA.Name == secret { + return true + } + + if endpointCA := cluster.GetBarmanEndpointCAForReplicaCluster(); endpointCA != nil && endpointCA.Name == secret { + return true + } + + if cluster.Status.PoolerIntegrations != nil { + for _, pgBouncerSecretName := range cluster.Status.PoolerIntegrations.PgBouncerIntegration.Secrets { + if pgBouncerSecretName == secret { + return true + } + } + } + + // watch the secrets defined in external clusters + return cluster.GetExternalClusterSecrets().Has(secret) +} + +// UsesConfigMap checks whether a given secret is used by a Cluster +func (cluster *Cluster) UsesConfigMap(config string) (ok bool) { + if _, ok := cluster.Status.ConfigMapResourceVersion.Metrics[config]; ok { + return true + } + return false +} + +// IsPodMonitorEnabled checks if the PodMonitor object needs to be created +func (cluster *Cluster) IsPodMonitorEnabled() bool { + if cluster.Spec.Monitoring != nil { + return cluster.Spec.Monitoring.EnablePodMonitor + } + + return false +} + +// IsMetricsTLSEnabled checks if the metrics endpoint should use TLS +func (cluster *Cluster) IsMetricsTLSEnabled() bool { + if cluster.Spec.Monitoring != nil && cluster.Spec.Monitoring.TLSConfig != nil { + return cluster.Spec.Monitoring.TLSConfig.Enabled + } + + return false +} + +// GetEnableSuperuserAccess returns if the superuser access is enabled or not +func (cluster *Cluster) GetEnableSuperuserAccess() bool { + if cluster.Spec.EnableSuperuserAccess != nil { + return *cluster.Spec.EnableSuperuserAccess + } + + return false +} + +// LogTimestampsWithMessage prints useful information about timestamps in stdout +func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage string) { + contextLogger := log.FromContext(ctx) + + currentTimestamp := pgTime.GetCurrentTimestamp() + keysAndValues := []interface{}{ + "phase", cluster.Status.Phase, + "currentTimestamp", currentTimestamp, + "targetPrimaryTimestamp", cluster.Status.TargetPrimaryTimestamp, + "currentPrimaryTimestamp", cluster.Status.CurrentPrimaryTimestamp, + } + + var errs []string + + // Elapsed time since the last request of promotion (TargetPrimaryTimestamp) + if diff, err := pgTime.DifferenceBetweenTimestamps( + currentTimestamp, + cluster.Status.TargetPrimaryTimestamp, + ); err == nil { + keysAndValues = append( + keysAndValues, + "msPassedSinceTargetPrimaryTimestamp", + diff.Milliseconds(), + ) + } else { + errs = append(errs, err.Error()) + } + + // Elapsed time since the last promotion (CurrentPrimaryTimestamp) + if currentPrimaryDifference, err := pgTime.DifferenceBetweenTimestamps( + currentTimestamp, + cluster.Status.CurrentPrimaryTimestamp, + ); err == nil { + keysAndValues = append( + keysAndValues, + "msPassedSinceCurrentPrimaryTimestamp", + currentPrimaryDifference.Milliseconds(), + ) + } else { + errs = append(errs, err.Error()) + } + + // Difference between the last promotion and the last request of promotion + // When positive, it is the amount of time required in the last promotion + // of a standby to a primary. If negative, it means we have a failover/switchover + // in progress, and the value represents the last measured uptime of the primary. + if currentPrimaryTargetDifference, err := pgTime.DifferenceBetweenTimestamps( + cluster.Status.CurrentPrimaryTimestamp, + cluster.Status.TargetPrimaryTimestamp, + ); err == nil { + keysAndValues = append( + keysAndValues, + "msDifferenceBetweenCurrentAndTargetPrimary", + currentPrimaryTargetDifference.Milliseconds(), + ) + } else { + errs = append(errs, err.Error()) + } + + if len(errs) > 0 { + keysAndValues = append(keysAndValues, "timestampParsingErrors", errs) + } + + contextLogger.Info(logMessage, keysAndValues...) +} + +// SetInheritedDataAndOwnership sets the cluster as owner of the passed object and then +// sets all the needed annotations and labels +func (cluster *Cluster) SetInheritedDataAndOwnership(obj *metav1.ObjectMeta) { + cluster.SetInheritedData(obj) + utils.SetAsOwnedBy(obj, cluster.ObjectMeta, cluster.TypeMeta) +} + +// SetInheritedData sets all the needed annotations and labels +func (cluster *Cluster) SetInheritedData(obj *metav1.ObjectMeta) { + utils.InheritAnnotations(obj, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) + utils.InheritLabels(obj, cluster.Labels, cluster.GetFixedInheritedLabels(), configuration.Current) + utils.LabelClusterName(obj, cluster.GetName()) +} + +// ShouldForceLegacyBackup if present takes a backup without passing the name argument even on barman version 3.3.0+. +// This is needed to test both backup system in the E2E suite +func (cluster *Cluster) ShouldForceLegacyBackup() bool { + return cluster.Annotations[utils.LegacyBackupAnnotationName] == "true" +} + +// GetSeccompProfile return the proper SeccompProfile set in the cluster for Pods and Containers +func (cluster *Cluster) GetSeccompProfile() *corev1.SeccompProfile { + if cluster.Spec.SeccompProfile != nil { + return cluster.Spec.SeccompProfile + } + + return &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } +} + +// GetCoredumpFilter get the coredump filter value from the cluster annotation +func (cluster *Cluster) GetCoredumpFilter() string { + value, ok := cluster.Annotations[utils.CoredumpFilter] + if ok { + return value + } + return system.DefaultCoredumpFilter +} + +// IsInplaceRestartPhase returns true if the cluster is in a phase that handles the Inplace restart +func (cluster *Cluster) IsInplaceRestartPhase() bool { + return cluster.Status.Phase == PhaseInplacePrimaryRestart || + cluster.Status.Phase == PhaseInplaceDeletePrimaryRestart +} + +// GetTablespaceConfiguration returns the tablespaceConfiguration for the given name +// otherwise return nil +func (cluster *Cluster) GetTablespaceConfiguration(name string) *TablespaceConfiguration { + for _, tbsConfig := range cluster.Spec.Tablespaces { + if name == tbsConfig.Name { + return &tbsConfig + } + } + + return nil +} + +// GetServerCASecretObjectKey returns a types.NamespacedName pointing to the secret +func (cluster *Cluster) GetServerCASecretObjectKey() types.NamespacedName { + return types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.GetServerCASecretName()} +} + +// IsBarmanBackupConfigured returns true if one of the possible backup destination +// is configured, false otherwise +func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool { + return backupConfiguration != nil && backupConfiguration.BarmanObjectStore != nil && + backupConfiguration.BarmanObjectStore.ArePopulated() +} + +// IsBarmanEndpointCASet returns true if we have a CA bundle for the endpoint +// false otherwise +func (backupConfiguration *BackupConfiguration) IsBarmanEndpointCASet() bool { + return backupConfiguration != nil && + backupConfiguration.BarmanObjectStore != nil && + backupConfiguration.BarmanObjectStore.EndpointCA != nil && + backupConfiguration.BarmanObjectStore.EndpointCA.Name != "" && + backupConfiguration.BarmanObjectStore.EndpointCA.Key != "" +} + +// UpdateBackupTimes sets the firstRecoverabilityPoint and lastSuccessfulBackup +// for the provided method, as well as the overall firstRecoverabilityPoint and +// lastSuccessfulBackup for the cluster +func (cluster *Cluster) UpdateBackupTimes( + backupMethod BackupMethod, + firstRecoverabilityPoint *time.Time, + lastSuccessfulBackup *time.Time, +) { + type comparer func(a metav1.Time, b metav1.Time) bool + // tryGetMaxTime gets either the newest or oldest time from a set of backup times, + // depending on the comparer argument passed to it + tryGetMaxTime := func(m map[BackupMethod]metav1.Time, compare comparer) string { + var maximum metav1.Time + for _, ts := range m { + if maximum.IsZero() || compare(ts, maximum) { + maximum = ts + } + } + result := "" + if !maximum.IsZero() { + result = maximum.Format(time.RFC3339) + } + + return result + } + + setTime := func(backupTimes map[BackupMethod]metav1.Time, value *time.Time) map[BackupMethod]metav1.Time { + if value == nil { + delete(backupTimes, backupMethod) + return backupTimes + } + + if backupTimes == nil { + backupTimes = make(map[BackupMethod]metav1.Time) + } + + backupTimes[backupMethod] = metav1.NewTime(*value) + return backupTimes + } + + cluster.Status.FirstRecoverabilityPointByMethod = setTime(cluster.Status.FirstRecoverabilityPointByMethod, + firstRecoverabilityPoint) + cluster.Status.FirstRecoverabilityPoint = tryGetMaxTime( + cluster.Status.FirstRecoverabilityPointByMethod, + // we pass a comparer to get the first among the recoverability points + func(a metav1.Time, b metav1.Time) bool { + return a.Before(&b) + }) + + cluster.Status.LastSuccessfulBackupByMethod = setTime(cluster.Status.LastSuccessfulBackupByMethod, + lastSuccessfulBackup) + cluster.Status.LastSuccessfulBackup = tryGetMaxTime( + cluster.Status.LastSuccessfulBackupByMethod, + // we pass a comparer to get the last among the last backup times per method + func(a metav1.Time, b metav1.Time) bool { + return b.Before(&a) + }) +} + +// IsReadServiceEnabled checks if the read service is enabled for the cluster. +// It returns false if the read service is listed in the DisabledDefaultServices slice. +func (cluster *Cluster) IsReadServiceEnabled() bool { + if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { + return true + } + + return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeR) +} + +// IsReadWriteServiceEnabled checks if the read-write service is enabled for the cluster. +// It returns false if the read-write service is listed in the DisabledDefaultServices slice. +func (cluster *Cluster) IsReadWriteServiceEnabled() bool { + if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { + return true + } + return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRW) +} + +// IsReadOnlyServiceEnabled checks if the read-only service is enabled for the cluster. +// It returns false if the read-only service is listed in the DisabledDefaultServices slice. +func (cluster *Cluster) IsReadOnlyServiceEnabled() bool { + if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { + return true + } + + return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRO) +} + +// GetRecoverySourcePlugin returns the configuration of the plugin being +// the recovery source of the cluster. If no such plugin have been configured, +// nil is returned +func (cluster *Cluster) GetRecoverySourcePlugin() *PluginConfiguration { + if cluster.Spec.Bootstrap == nil || cluster.Spec.Bootstrap.Recovery == nil { + return nil + } + + recoveryConfig := cluster.Spec.Bootstrap.Recovery + if len(recoveryConfig.Source) == 0 { + // Plugin-based recovery is supported only with + // An external cluster definition + return nil + } + + recoveryExternalCluster, found := cluster.ExternalCluster(recoveryConfig.Source) + if !found { + // This error should have already been detected + // by the validating webhook. + return nil + } + + return recoveryExternalCluster.PluginConfiguration +} + +// EnsureGVKIsPresent ensures that the GroupVersionKind (GVK) metadata is present in the Backup object. +// This is necessary because informers do not automatically include metadata inside the object. +// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object. +func (cluster *Cluster) EnsureGVKIsPresent() { + cluster.SetGroupVersionKind(schema.GroupVersionKind{ + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, + Kind: ClusterKind, + }) +} + +// BuildPostgresOptions create the list of options that +// should be added to the PostgreSQL configuration to +// recover given a certain target +func (target *RecoveryTarget) BuildPostgresOptions() string { + result := "" + + if target == nil { + return result + } + + if target.TargetTLI != "" { + result += fmt.Sprintf( + "recovery_target_timeline = '%v'\n", + target.TargetTLI) + } + if target.TargetXID != "" { + result += fmt.Sprintf( + "recovery_target_xid = '%v'\n", + target.TargetXID) + } + if target.TargetName != "" { + result += fmt.Sprintf( + "recovery_target_name = '%v'\n", + target.TargetName) + } + if target.TargetLSN != "" { + result += fmt.Sprintf( + "recovery_target_lsn = '%v'\n", + target.TargetLSN) + } + if target.TargetTime != "" { + result += fmt.Sprintf( + "recovery_target_time = '%v'\n", + pgTime.ConvertToPostgresFormat(target.TargetTime)) + } + if target.TargetImmediate != nil && *target.TargetImmediate { + result += "recovery_target = immediate\n" + } + if target.Exclusive != nil && *target.Exclusive { + result += "recovery_target_inclusive = false\n" + } else { + result += "recovery_target_inclusive = true\n" + } + + return result +} + +// ApplyInto applies the content of the probe configuration in a Kubernetes +// probe +func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) { + if p == nil { + return + } + + if p.InitialDelaySeconds != 0 { + k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds + } + if p.TimeoutSeconds != 0 { + k8sProbe.TimeoutSeconds = p.TimeoutSeconds + } + if p.PeriodSeconds != 0 { + k8sProbe.PeriodSeconds = p.PeriodSeconds + } + if p.SuccessThreshold != 0 { + k8sProbe.SuccessThreshold = p.SuccessThreshold + } + if p.FailureThreshold != 0 { + k8sProbe.FailureThreshold = p.FailureThreshold + } + if p.TerminationGracePeriodSeconds != nil { + k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds + } +} + +// ApplyInto applies the content of the probe configuration in a Kubernetes +// probe +func (p *ProbeWithStrategy) ApplyInto(k8sProbe *corev1.Probe) { + if p == nil { + return + } + + p.Probe.ApplyInto(k8sProbe) +} + +// GetEnabledWALArchivePluginName returns the name of the enabled backup plugin or an empty string +// if no backup plugin is enabled +func (cluster *Cluster) GetEnabledWALArchivePluginName() string { + for _, plugin := range cluster.Spec.Plugins { + if plugin.IsEnabled() && plugin.IsWALArchiver != nil && *plugin.IsWALArchiver { + return plugin.Name + } + } + + return "" +} + +// IsFailoverQuorumActive check if we should enable the +// quorum failover protection alpha-feature. +func (cluster *Cluster) IsFailoverQuorumActive() (bool, error) { + failoverQuorumAnnotation, ok := cluster.GetAnnotations()[utils.FailoverQuorumAnnotationName] + if !ok || failoverQuorumAnnotation == "" { + return false, nil + } + + v, err := strconv.ParseBool(failoverQuorumAnnotation) + if err != nil { + return false, fmt.Errorf("failed to parse failover quorum annotation '%v': %v", failoverQuorumAnnotation, err) + } + + return v, nil +} diff --git a/api/v1/cluster_types_test.go b/api/v1/cluster_funcs_test.go similarity index 90% rename from api/v1/cluster_types_test.go rename to api/v1/cluster_funcs_test.go index 03a92ae90a..5e92d4ea40 100644 --- a/api/v1/cluster_types_test.go +++ b/api/v1/cluster_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -21,12 +24,13 @@ import ( "time" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" + "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" @@ -451,7 +455,7 @@ var _ = Describe("external cluster list", func() { }) }) -var _ = Describe("look up for secrets", func() { +var _ = Describe("look up for secrets", Ordered, func() { cluster := Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "clustername", @@ -459,12 +463,12 @@ var _ = Describe("look up for secrets", func() { } // assertServiceNamesPresent returns the first missing service name encountered - assertServiceNamesPresent := func(data *stringset.Data, serviceName string) string { + assertServiceNamesPresent := func(data *stringset.Data, serviceName string, clusterDomain string) string { assertions := []string{ serviceName, fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc.%s", serviceName, cluster.Namespace, clusterDomain), } for _, assertion := range assertions { if !data.Has(assertion) { @@ -478,25 +482,29 @@ var _ = Describe("look up for secrets", func() { It("retrieves client CA secret name", func() { Expect(cluster.GetClientCASecretName()).To(Equal("clustername-ca")) }) + It("retrieves server CA secret name", func() { Expect(cluster.GetServerCASecretName()).To(Equal("clustername-ca")) }) + It("retrieves replication secret name", func() { Expect(cluster.GetReplicationSecretName()).To(Equal("clustername-replication")) }) + It("retrieves replication secret name", func() { Expect(cluster.GetReplicationSecretName()).To(Equal("clustername-replication")) }) + It("retrieves all names needed to build a server CA certificate", func() { names := cluster.GetClusterAltDNSNames() Expect(names).To(HaveLen(12)) namesSet := stringset.From(names) Expect(namesSet.Len()).To(Equal(12)) - Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadWriteName())).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadWriteName(), "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadName())).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadName(), "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadOnlyName())).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadOnlyName(), "cluster.local")).To(BeEmpty(), "missing service name") }) @@ -515,9 +523,9 @@ var _ = Describe("look up for secrets", func() { It("should generate correctly the managed services names", func() { namesSet := stringset.From(cluster.GetClusterAltDNSNames()) Expect(namesSet.Len()).To(Equal(20)) - Expect(assertServiceNamesPresent(namesSet, "one")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "one", "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, "two")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "two", "cluster.local")).To(BeEmpty(), "missing service name") }) @@ -530,9 +538,9 @@ var _ = Describe("look up for secrets", func() { Expect(namesSet.Len()).To(Equal(12)) Expect(namesSet.Has(cluster.GetServiceReadName())).To(BeFalse()) Expect(namesSet.Has(cluster.GetServiceReadOnlyName())).To(BeFalse()) - Expect(assertServiceNamesPresent(namesSet, "one")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "one", "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, "two")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "two", "cluster.local")).To(BeEmpty(), "missing service name") }) }) @@ -769,20 +777,16 @@ var _ = Describe("A config map resource version", func() { var _ = Describe("PostgreSQL version detection", func() { tests := []struct { - imageName string - postgresVersion int + imageName string + postgresMajorVersion int }{ { "ghcr.io/cloudnative-pg/postgresql:14.0", - 140000, - }, - { - "ghcr.io/cloudnative-pg/postgresql:13.2", - 130002, + 14, }, { - "ghcr.io/cloudnative-pg/postgresql:9.6.3", - 90603, + "ghcr.io/cloudnative-pg/postgresql:17.4", + 17, }, } @@ -790,7 +794,7 @@ var _ = Describe("PostgreSQL version detection", func() { cluster := Cluster{} for _, test := range tests { cluster.Spec.ImageName = test.imageName - Expect(cluster.GetPostgresqlVersion()).To(Equal(test.postgresVersion)) + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(test.postgresMajorVersion)) } }) It("correctly extract PostgreSQL versions from ImageCatalogRef", func() { @@ -802,7 +806,32 @@ var _ = Describe("PostgreSQL version detection", func() { }, Major: 16, } - Expect(cluster.GetPostgresqlVersion()).To(Equal(160000)) + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(16)) + }) + + It("correctly prioritizes ImageCatalogRef over Status.Image and Spec.ImageName", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + ImageName: "ghcr.io/cloudnative-pg/postgresql:14.1", + ImageCatalogRef: &ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test-catalog", + Kind: "ImageCatalog", + }, + Major: 16, + }, + }, + Status: ClusterStatus{ + Image: "ghcr.io/cloudnative-pg/postgresql:15.2", + }, + } + + // ImageCatalogRef should take precedence + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(16)) + + // Remove Status.Image, Spec.ImageName should be used + cluster.Spec.ImageCatalogRef = nil + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(14)) }) }) @@ -1157,10 +1186,10 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { synchronizeReplicas = &SynchronizeReplicasConfiguration{} }) - Context("compileRegex", func() { + Context("CompileRegex", func() { It("should return no errors when SynchronizeReplicasConfiguration is nil", func() { synchronizeReplicas = nil - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).ToNot(HaveOccurred()) }) Context("when SynchronizeReplicasConfiguration is not nil", func() { @@ -1169,7 +1198,7 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { }) It("should compile patterns without errors", func() { - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).ToNot(HaveOccurred()) }) Context("when a pattern fails to compile", func() { @@ -1178,16 +1207,11 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { }) It("should return errors for the invalid pattern", func() { - errors := synchronizeReplicas.compileRegex() - Expect(errors).To(HaveLen(1)) + err := synchronizeReplicas.ValidateRegex() + Expect(err).To(HaveOccurred()) }) }) }) - - It("should return no errors on subsequent calls when compile is called multiple times", func() { - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) - }) }) Context("GetEnabled", func() { @@ -1239,19 +1263,12 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { Expect(isExcludedByUser).To(BeTrue()) }) - It("should compile patterns before checking for exclusion when compile is not called", func() { - Expect(synchronizeReplicas.compiledPatterns).To(BeEmpty()) - isExcludedByUser, err := synchronizeReplicas.IsExcludedByUser("pattern1MatchingSlot") - Expect(err).ToNot(HaveOccurred()) - Expect(isExcludedByUser, err).To(BeTrue()) - Expect(synchronizeReplicas.compiledPatterns).To(HaveLen(2)) - }) - It("should return an error in case of an invalid pattern", func() { synchronizeReplicas.ExcludePatterns = []string{"([a-zA-Z]+"} isExcludedByUser, err := synchronizeReplicas.IsExcludedByUser("test") Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("error parsing regexp: missing closing ): `([a-zA-Z]+`")) + Expect(err.Error()).To(Equal("failed to compile regex patterns: error parsing regexp: " + + "missing closing ): `([a-zA-Z]+`; ")) Expect(isExcludedByUser).To(BeFalse()) }) }) @@ -1681,3 +1698,84 @@ var _ = Describe("UpdateBackupTimes", func() { To(Equal(now)) }) }) + +var _ = Describe("Probes configuration", func() { + originalProbe := corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt32(23), + }, + }, + + InitialDelaySeconds: 21, + PeriodSeconds: 11, + FailureThreshold: 433, + TerminationGracePeriodSeconds: ptr.To[int64](23), + } + + It("Does not change any field if the configuration is nil", func() { + var nilProbe *Probe + configuredProbe := originalProbe.DeepCopy() + nilProbe.ApplyInto(configuredProbe) + Expect(originalProbe).To(BeEquivalentTo(*configuredProbe)) + }) + + It("Changes the corresponding fields", func() { + config := &Probe{ + InitialDelaySeconds: 1, + TimeoutSeconds: 2, + PeriodSeconds: 3, + SuccessThreshold: 4, + FailureThreshold: 5, + TerminationGracePeriodSeconds: nil, + } + + configuredProbe := originalProbe.DeepCopy() + config.ApplyInto(configuredProbe) + Expect(configuredProbe.InitialDelaySeconds).To(Equal(config.InitialDelaySeconds)) + Expect(configuredProbe.TimeoutSeconds).To(Equal(config.TimeoutSeconds)) + Expect(configuredProbe.PeriodSeconds).To(Equal(config.PeriodSeconds)) + Expect(configuredProbe.SuccessThreshold).To(Equal(config.SuccessThreshold)) + Expect(configuredProbe.FailureThreshold).To(Equal(config.FailureThreshold)) + Expect(*configuredProbe.TerminationGracePeriodSeconds).To(BeEquivalentTo(23)) + }) + + It("should not overwrite any field", func() { + config := &Probe{} + configuredProbe := originalProbe.DeepCopy() + config.ApplyInto(configuredProbe) + Expect(originalProbe).To(BeEquivalentTo(*configuredProbe), + "configured probe should not be modified with zero values") + }) +}) + +var _ = Describe("Failover quorum annotation", func() { + clusterWithAnnotation := func(v string) *Cluster { + return &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: v, + }, + }, + } + } + + DescribeTable( + "annotation parsing", + func(cluster *Cluster, valueIsCorrect, expected bool) { + actual, err := cluster.IsFailoverQuorumActive() + if valueIsCorrect { + Expect(err).ToNot(HaveOccurred()) + } else { + Expect(err).To(HaveOccurred()) + } + Expect(actual).To(Equal(expected)) + }, + Entry("with no annotation", &Cluster{}, true, false), + Entry("with empty annotation", clusterWithAnnotation(""), true, false), + Entry("with true annotation", clusterWithAnnotation("t"), true, true), + Entry("with false annotation", clusterWithAnnotation("f"), true, false), + Entry("with invalid annotation", clusterWithAnnotation("xxx"), false, false), + ) +}) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 6f1d702621..94006080c9 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,50 +13,19 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 import ( - "context" - "fmt" - "regexp" - "slices" - "strconv" - "strings" - "time" - - machineryapi "github.com/cloudnative-pg/machinery/pkg/api" - "github.com/cloudnative-pg/machinery/pkg/log" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" - "github.com/cloudnative-pg/cloudnative-pg/pkg/system" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) -// LocalObjectReference contains enough information to let you locate a -// local object with a known type inside the same namespace -// +kubebuilder:object:generate:=false -type LocalObjectReference = machineryapi.LocalObjectReference - -// SecretKeySelector contains enough information to let you locate -// the key of a Secret -// +kubebuilder:object:generate:=false -type SecretKeySelector = machineryapi.SecretKeySelector - -// ConfigMapKeySelector contains enough information to let you locate -// the key of a ConfigMap -// +kubebuilder:object:generate:=false -type ConfigMapKeySelector = machineryapi.ConfigMapKeySelector - const ( // PrimaryPodDisruptionBudgetSuffix is the suffix appended to the cluster name // to get the name of the PDB used for the cluster primary @@ -118,11 +88,11 @@ const ( // streaming replication purposes StreamingReplicationUser = "streaming_replica" - // defaultPostgresUID is the default UID which is used by PostgreSQL - defaultPostgresUID = 26 + // DefaultPostgresUID is the default UID which is used by PostgreSQL + DefaultPostgresUID = 26 - // defaultPostgresGID is the default GID which is used by PostgreSQL - defaultPostgresGID = 26 + // DefaultPostgresGID is the default GID which is used by PostgreSQL + DefaultPostgresGID = 26 // PodAntiAffinityTypeRequired is the label for required anti-affinity type PodAntiAffinityTypeRequired = "required" @@ -142,6 +112,11 @@ const ( // MissingWALDiskSpaceExitCode is the exit code the instance manager // will use to signal that there's no more WAL disk space MissingWALDiskSpaceExitCode = 4 + + // MissingWALArchivePlugin is the exit code used by the instance manager + // to indicate that it started successfully, but the configured WAL + // archiving plugin is not available. + MissingWALArchivePlugin = 5 ) // SnapshotOwnerReference defines the reference type for the owner of the snapshot. @@ -195,16 +170,6 @@ type VolumeSnapshotConfiguration struct { OnlineConfiguration OnlineConfiguration `json:"onlineConfiguration,omitempty"` } -// GetOnline tells whether this volume snapshot configuration allows -// online backups -func (configuration *VolumeSnapshotConfiguration) GetOnline() bool { - if configuration.Online == nil { - return true - } - - return *configuration.Online -} - // OnlineConfiguration contains the configuration parameters for the online volume snapshot type OnlineConfiguration struct { // If false, the function will return immediately after the backup is completed, @@ -229,30 +194,11 @@ type OnlineConfiguration struct { ImmediateCheckpoint *bool `json:"immediateCheckpoint,omitempty"` } -// GetWaitForArchive tells whether to wait for archive or not -func (o OnlineConfiguration) GetWaitForArchive() bool { - if o.WaitForArchive == nil { - return true - } - - return *o.WaitForArchive -} - -// GetImmediateCheckpoint tells whether to execute an immediate checkpoint -func (o OnlineConfiguration) GetImmediateCheckpoint() bool { - if o.ImmediateCheckpoint == nil { - return false - } - - return *o.ImmediateCheckpoint -} - // ImageCatalogRef defines the reference to a major version in an ImageCatalog type ImageCatalogRef struct { // +kubebuilder:validation:XValidation:rule="self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'",message="Only image catalogs are supported" // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'postgresql.cnpg.io'",message="Only image catalogs are supported" corev1.TypedLocalObjectReference `json:",inline"` - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Major is immutable" // The major version of PostgreSQL we want to use from the ImageCatalog Major int `json:"major"` } @@ -441,6 +387,7 @@ type ClusterSpec struct { // EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral // volumes + // +optional EphemeralVolumesSizeLimit *EphemeralVolumesSizeLimitConfiguration `json:"ephemeralVolumesSizeLimit,omitempty"` // Name of the priority class which will be used in every generated Pod, if the PriorityClass @@ -532,23 +479,131 @@ type ClusterSpec struct { // The plugins configuration, containing // any plugin to be loaded with the corresponding configuration - Plugins PluginConfigurationList `json:"plugins,omitempty"` + // +optional + Plugins []PluginConfiguration `json:"plugins,omitempty"` + + // The configuration of the probes to be injected + // in the PostgreSQL Pods. + // +optional + Probes *ProbesConfiguration `json:"probes,omitempty"` +} + +// ProbesConfiguration represent the configuration for the probes +// to be injected in the PostgreSQL Pods +type ProbesConfiguration struct { + // The startup probe configuration + Startup *ProbeWithStrategy `json:"startup,omitempty"` + + // The liveness probe configuration + Liveness *LivenessProbe `json:"liveness,omitempty"` + + // The readiness probe configuration + Readiness *ProbeWithStrategy `json:"readiness,omitempty"` +} + +// ProbeWithStrategy is the configuration of the startup probe +type ProbeWithStrategy struct { + // Probe is the standard probe configuration + Probe `json:",inline"` + + // The probe strategy + // +kubebuilder:validation:Enum=pg_isready;streaming;query + // +optional + Type ProbeStrategyType `json:"type,omitempty"` + + // Lag limit. Used only for `streaming` strategy + // +optional + MaximumLag *resource.Quantity `json:"maximumLag,omitempty"` +} + +// ProbeStrategyType is the type of the strategy used to declare a PostgreSQL instance +// ready +type ProbeStrategyType string + +const ( + // ProbeStrategyPgIsReady means that the pg_isready tool is used to determine + // whether PostgreSQL is started up + ProbeStrategyPgIsReady ProbeStrategyType = "pg_isready" + + // ProbeStrategyStreaming means that pg_isready is positive and the replica is + // connected via streaming replication to the current primary and the lag is, if specified, + // within the limit. + ProbeStrategyStreaming ProbeStrategyType = "streaming" + + // ProbeStrategyQuery means that the server is able to connect to the superuser database + // and able to execute a simple query like "-- ping" + ProbeStrategyQuery ProbeStrategyType = "query" +) + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty"` + // Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + // value overrides the value provided by the pod spec. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } -// PluginConfigurationList represent a set of plugin with their -// configuration parameters -type PluginConfigurationList []PluginConfiguration +// LivenessProbe is the configuration of the liveness probe +type LivenessProbe struct { + // Probe is the standard probe configuration + Probe `json:",inline"` -// GetEnabledPluginNames gets the name of the plugins that are involved -// in the reconciliation of this cluster -func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []string) { - pluginNames := make([]string, 0, len(pluginList)) - for _, pluginDeclaration := range pluginList { - if pluginDeclaration.IsEnabled() { - pluginNames = append(pluginNames, pluginDeclaration.Name) - } - } - return pluginNames + // Configure the feature that extends the liveness probe for a primary + // instance. In addition to the basic checks, this verifies whether the + // primary is isolated from the Kubernetes API server and from its + // replicas, ensuring that it can be safely shut down if network + // partition or API unavailability is detected. Enabled by default. + // +optional + IsolationCheck *IsolationCheckConfiguration `json:"isolationCheck,omitempty"` +} + +// IsolationCheckConfiguration contains the configuration for the isolation check +// functionality in the liveness probe +type IsolationCheckConfiguration struct { + // Whether primary isolation checking is enabled for the liveness probe + // +optional + // +kubebuilder:default:=true + Enabled *bool `json:"enabled,omitempty"` + + // Timeout in milliseconds for requests during the primary isolation check + // +optional + // +kubebuilder:default:=1000 + RequestTimeout int `json:"requestTimeout,omitempty"` + + // Timeout in milliseconds for connections during the primary isolation check + // +optional + // +kubebuilder:default:=1000 + ConnectionTimeout int `json:"connectionTimeout,omitempty"` } const ( @@ -567,6 +622,13 @@ const ( // PhaseUpgrade upgrade in process PhaseUpgrade = "Upgrading cluster" + // PhaseMajorUpgrade major version upgrade in process + PhaseMajorUpgrade = "Upgrading Postgres major version" + + // PhaseUpgradeDelayed is set when a cluster needs to be upgraded, + // but the operation is being delayed by the operator configuration + PhaseUpgradeDelayed = "Cluster upgrade delayed" + // PhaseWaitingForUser set the status to wait for an action from the user PhaseWaitingForUser = "Waiting for user action" @@ -583,12 +645,15 @@ const ( // loaded still PhaseUnknownPlugin = "Cluster cannot proceed to reconciliation due to an unknown plugin being required" + // PhaseFailurePlugin is triggered when the cluster cannot proceed to reconciliation due to an interaction failure + PhaseFailurePlugin = "Cluster cannot proceed to reconciliation due to an error while interacting with plugins" + // PhaseImageCatalogError is triggered when the cluster cannot select the image to // apply because of an invalid or incomplete catalog PhaseImageCatalogError = "Cluster has incomplete or invalid image catalog" // PhaseUnrecoverable for an unrecoverable cluster - PhaseUnrecoverable = "Cluster is in an unrecoverable state, needs manual intervention" + PhaseUnrecoverable = "Cluster is unrecoverable and needs manual intervention" // PhaseArchitectureBinaryMissing is the error phase describing a missing architecture PhaseArchitectureBinaryMissing = "Cluster cannot execute instance online upgrade due to missing architecture binary" @@ -614,30 +679,14 @@ const ( // storage type EphemeralVolumesSizeLimitConfiguration struct { // Shm is the size limit of the shared memory volume + // +optional Shm *resource.Quantity `json:"shm,omitempty"` // TemporaryData is the size limit of the temporary data volume + // +optional TemporaryData *resource.Quantity `json:"temporaryData,omitempty"` } -// GetShmLimit gets the `/dev/shm` memory size limit -func (e *EphemeralVolumesSizeLimitConfiguration) GetShmLimit() *resource.Quantity { - if e == nil { - return nil - } - - return e.Shm -} - -// GetTemporaryDataLimit gets the temporary storage size limit -func (e *EphemeralVolumesSizeLimitConfiguration) GetTemporaryDataLimit() *resource.Quantity { - if e == nil { - return nil - } - - return e.TemporaryData -} - // ServiceAccountTemplate contains the template needed to generate the service accounts type ServiceAccountTemplate struct { // Metadata are the metadata to be used for the generated @@ -645,37 +694,9 @@ type ServiceAccountTemplate struct { Metadata Metadata `json:"metadata"` } -// MergeMetadata adds the passed custom annotations and labels in the service account. -func (st *ServiceAccountTemplate) MergeMetadata(sa *corev1.ServiceAccount) { - if st == nil { - return - } - if sa.Labels == nil { - sa.Labels = map[string]string{} - } - if sa.Annotations == nil { - sa.Annotations = map[string]string{} - } - - utils.MergeMap(sa.Labels, st.Metadata.Labels) - utils.MergeMap(sa.Annotations, st.Metadata.Annotations) -} - // PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue type PodTopologyLabels map[string]string -// MatchesTopology checks if the two topologies have -// the same label values (labels are specified in SyncReplicaElectionConstraints.NodeLabelsAntiAffinity) -func (topologyLabels PodTopologyLabels) MatchesTopology(instanceTopology PodTopologyLabels) bool { - log.Debug("matching topology", "main", topologyLabels, "second", instanceTopology) - for mainLabelName, mainLabelValue := range topologyLabels { - if mainLabelValue != instanceTopology[mainLabelName] { - return false - } - } - return true -} - // PodName is the name of a Pod type PodName string @@ -776,16 +797,6 @@ type AvailableArchitecture struct { Hash string `json:"hash"` } -// GetAvailableArchitecture returns an AvailableArchitecture given it's name. It returns nil if it's not found. -func (status *ClusterStatus) GetAvailableArchitecture(archName string) *AvailableArchitecture { - for _, architecture := range status.AvailableArchitectures { - if architecture.GoArch == archName { - return &architecture - } - } - return nil -} - // ClusterStatus defines the observed state of Cluster type ClusterStatus struct { // The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods. @@ -798,7 +809,7 @@ type ClusterStatus struct { // InstancesStatus indicates in which status the instances are // +optional - InstancesStatus map[utils.PodStatus][]string `json:"instancesStatus,omitempty"` + InstancesStatus map[PodStatus][]string `json:"instancesStatus,omitempty"` // The reported state of the instances during the last reconciliation loop // +optional @@ -835,6 +846,7 @@ type ClusterStatus struct { // LastPromotionToken is the last verified promotion token that // was used to promote a replica cluster + // +optional LastPromotionToken string `json:"lastPromotionToken,omitempty"` // How many PVCs have been created by this cluster @@ -901,24 +913,34 @@ type ClusterStatus struct { Certificates CertificatesStatus `json:"certificates,omitempty"` // The first recoverability point, stored as a date in RFC3339 format. - // This field is calculated from the content of FirstRecoverabilityPointByMethod + // This field is calculated from the content of FirstRecoverabilityPointByMethod. + // + // Deprecated: the field is not set for backup plugins. // +optional FirstRecoverabilityPoint string `json:"firstRecoverabilityPoint,omitempty"` - // The first recoverability point, stored as a date in RFC3339 format, per backup method type + // The first recoverability point, stored as a date in RFC3339 format, per backup method type. + // + // Deprecated: the field is not set for backup plugins. // +optional FirstRecoverabilityPointByMethod map[BackupMethod]metav1.Time `json:"firstRecoverabilityPointByMethod,omitempty"` - // Last successful backup, stored as a date in RFC3339 format - // This field is calculated from the content of LastSuccessfulBackupByMethod + // Last successful backup, stored as a date in RFC3339 format. + // This field is calculated from the content of LastSuccessfulBackupByMethod. + // + // Deprecated: the field is not set for backup plugins. // +optional LastSuccessfulBackup string `json:"lastSuccessfulBackup,omitempty"` - // Last successful backup, stored as a date in RFC3339 format, per backup method type + // Last successful backup, stored as a date in RFC3339 format, per backup method type. + // + // Deprecated: the field is not set for backup plugins. // +optional LastSuccessfulBackupByMethod map[BackupMethod]metav1.Time `json:"lastSuccessfulBackupByMethod,omitempty"` - // Stored as a date in RFC3339 format + // Last failed backup, stored as a date in RFC3339 format. + // + // Deprecated: the field is not set for backup plugins. // +optional LastFailedBackup string `json:"lastFailedBackup,omitempty"` @@ -963,15 +985,16 @@ type ClusterStatus struct { // +optional OnlineUpdateEnabled bool `json:"onlineUpdateEnabled,omitempty"` - // AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster - // +optional - AzurePVCUpdateEnabled bool `json:"azurePVCUpdateEnabled,omitempty"` - // Image contains the image name used by the pods // +optional Image string `json:"image,omitempty"` + // PGDataImageInfo contains the details of the latest image that has run on the current data directory. + // +optional + PGDataImageInfo *ImageInfo `json:"pgDataImageInfo,omitempty"` + // PluginStatus is the status of the loaded plugins + // +optional PluginStatus []PluginStatus `json:"pluginStatus,omitempty"` // SwitchReplicaClusterStatus is the status of the switch to replica cluster @@ -984,6 +1007,18 @@ type ClusterStatus struct { // WAL file, and Time of latest checkpoint // +optional DemotionToken string `json:"demotionToken,omitempty"` + + // SystemID is the latest detected PostgreSQL SystemID + // +optional + SystemID string `json:"systemID,omitempty"` +} + +// ImageInfo contains the information about a PostgreSQL image +type ImageInfo struct { + // Image is the image name + Image string `json:"image"` + // MajorVersion is the major version of the image + MajorVersion int `json:"majorVersion"` } // SwitchReplicaClusterStatus contains all the statuses regarding the switch of a cluster to a replica cluster @@ -1000,6 +1035,8 @@ type InstanceReportedState struct { // indicates on which TimelineId the instance is // +optional TimeLineID int `json:"timeLineID,omitempty"` + // IP address of the instance + IP string `json:"ip,omitempty"` } // ClusterConditionType defines types of cluster conditions @@ -1014,38 +1051,9 @@ const ( ConditionBackup ClusterConditionType = "LastBackupSucceeded" // ConditionClusterReady represents whether a cluster is Ready ConditionClusterReady ClusterConditionType = "Ready" -) - -// A Condition that can be used to communicate the Backup progress -var ( - // BackupSucceededCondition is added to a backup - // when it was completed correctly - BackupSucceededCondition = &metav1.Condition{ - Type: string(ConditionBackup), - Status: metav1.ConditionTrue, - Reason: string(ConditionReasonLastBackupSucceeded), - Message: "Backup was successful", - } - - // BackupStartingCondition is added to a backup - // when it started - BackupStartingCondition = &metav1.Condition{ - Type: string(ConditionBackup), - Status: metav1.ConditionFalse, - Reason: string(ConditionBackupStarted), - Message: "New Backup starting up", - } - - // BuildClusterBackupFailedCondition builds - // ConditionReasonLastBackupFailed condition - BuildClusterBackupFailedCondition = func(err error) *metav1.Condition { - return &metav1.Condition{ - Type: string(ConditionBackup), - Status: metav1.ConditionFalse, - Reason: string(ConditionReasonLastBackupFailed), - Message: err.Error(), - } - } + // ConditionConsistentSystemID is true when the all the instances of the + // cluster report the same System ID. + ConditionConsistentSystemID ClusterConditionType = "ConsistentSystemID" ) // ConditionStatus defines conditions of resources @@ -1122,10 +1130,12 @@ type PgBouncerIntegrationStatus struct { type ReplicaClusterConfiguration struct { // Self defines the name of this cluster. It is used to determine if this is a primary // or a replica cluster, comparing it with `primary` + // +optional Self string `json:"self,omitempty"` // Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the // topology specified in externalClusters + // +optional Primary string `json:"primary,omitempty"` // The name of the external cluster which is the replication origin @@ -1136,10 +1146,12 @@ type ReplicaClusterConfiguration struct { // existing cluster. Replica cluster can be created from a recovery // object store or via streaming through pg_basebackup. // Refer to the Replica clusters page of the documentation for more information. + // +optional Enabled *bool `json:"enabled,omitempty"` // A demotion token generated by an external cluster used to // check if the promotion requirements are met. + // +optional PromotionToken string `json:"promotionToken,omitempty"` // When replica mode is enabled, this parameter allows you to replay @@ -1147,6 +1159,7 @@ type ReplicaClusterConfiguration struct { // time past the commit time. This provides an opportunity to correct // data loss errors. Note that when this parameter is set, a promotion // token cannot be used. + // +optional MinApplyDelay *metav1.Duration `json:"minApplyDelay,omitempty"` } @@ -1166,74 +1179,6 @@ type SynchronizeReplicasConfiguration struct { // List of regular expression patterns to match the names of replication slots to be excluded (by default empty) // +optional ExcludePatterns []string `json:"excludePatterns,omitempty"` - - synchronizeReplicasCache `json:"-"` -} - -// synchronizeReplicasCache contains the result of the regex compilation -// +kubebuilder:object:generate:=false -type synchronizeReplicasCache struct { - compiledPatterns []regexp.Regexp `json:"-"` - - compiled bool `json:"-"` - - compileErrors []error `json:"-"` -} - -// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot -// generate the DeepCopyInto for the regexp type. -// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto -func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} - -func (r *SynchronizeReplicasConfiguration) compileRegex() []error { - if r == nil { - return nil - } - if r.compiled { - return r.compileErrors - } - - var errs []error - for _, pattern := range r.ExcludePatterns { - re, err := regexp.Compile(pattern) - if err != nil { - errs = append(errs, err) - continue - } - r.compiledPatterns = append(r.compiledPatterns, *re) - } - - r.compiled = true - r.compileErrors = errs - return errs -} - -// GetEnabled returns false if synchronized replication slots are disabled, defaults to true -func (r *SynchronizeReplicasConfiguration) GetEnabled() bool { - if r != nil && r.Enabled != nil { - return *r.Enabled - } - return true -} - -// IsExcludedByUser returns if a replication slot should not be reconciled on the replicas -func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bool, error) { - if r == nil { - return false, nil - } - - // this is an unexpected issue, validation should happen at webhook level - if errs := r.compileRegex(); len(errs) > 0 { - return false, errs[0] - } - - for _, re := range r.compiledPatterns { - if re.MatchString(slotName) { - return true, nil - } - } - - return false, nil } // ReplicationSlotsConfiguration encapsulates the configuration @@ -1256,19 +1201,6 @@ type ReplicationSlotsConfiguration struct { SynchronizeReplicas *SynchronizeReplicasConfiguration `json:"synchronizeReplicas,omitempty"` } -// GetEnabled returns false if replication slots are disabled, default is true -func (r *ReplicationSlotsConfiguration) GetEnabled() bool { - return r.SynchronizeReplicas.GetEnabled() || r.HighAvailability.GetEnabled() -} - -// GetUpdateInterval returns the update interval, defaulting to DefaultReplicationSlotsUpdateInterval if empty -func (r *ReplicationSlotsConfiguration) GetUpdateInterval() time.Duration { - if r == nil || r.UpdateInterval <= 0 { - return DefaultReplicationSlotsUpdateInterval - } - return time.Duration(r.UpdateInterval) * time.Second -} - // ReplicationSlotsHAConfiguration encapsulates the configuration // of the replication slots that are automatically managed by // the operator to control the streaming replication connections @@ -1296,39 +1228,16 @@ type ReplicationSlotsHAConfiguration struct { // +kubebuilder:validation:Pattern=^[0-9a-z_]*$ // +optional SlotPrefix string `json:"slotPrefix,omitempty"` -} - -// GetSlotPrefix returns the HA slot prefix, defaulting to DefaultReplicationSlotsHASlotPrefix if empty -func (r *ReplicationSlotsHAConfiguration) GetSlotPrefix() string { - if r == nil || r.SlotPrefix == "" { - return DefaultReplicationSlotsHASlotPrefix - } - return r.SlotPrefix -} - -// GetSlotNameFromInstanceName returns the slot name, given the instance name. -// It returns an empty string if High Availability Replication Slots are disabled -func (r *ReplicationSlotsHAConfiguration) GetSlotNameFromInstanceName(instanceName string) string { - if r == nil || !r.GetEnabled() { - return "" - } - slotName := fmt.Sprintf( - "%s%s", - r.GetSlotPrefix(), - instanceName, - ) - sanitizedName := slotNameNegativeRegex.ReplaceAllString(strings.ToLower(slotName), "_") - - return sanitizedName -} - -// GetEnabled returns false if replication slots are disabled, default is true -func (r *ReplicationSlotsHAConfiguration) GetEnabled() bool { - if r != nil && r.Enabled != nil { - return *r.Enabled - } - return true + // When enabled, the operator automatically manages synchronization of logical + // decoding (replication) slots across high-availability clusters. + // + // Requires one of the following conditions: + // - PostgreSQL version 17 or later + // - PostgreSQL version < 17 with pg_failover_slots extension enabled + // + // +optional + SynchronizeLogicalDecoding bool `json:"synchronizeLogicalDecoding,omitempty"` } // KubernetesUpgradeStrategy tells the operator if the user want to @@ -1419,16 +1328,24 @@ const ( SynchronousReplicaConfigurationMethodAny = SynchronousReplicaConfigurationMethod("any") ) -// ToPostgreSQLConfigurationKeyword returns the contained value as a valid PostgreSQL parameter to be injected -// in the 'synchronous_standby_names' field -func (s SynchronousReplicaConfigurationMethod) ToPostgreSQLConfigurationKeyword() string { - return strings.ToUpper(string(s)) -} +// DataDurabilityLevel specifies how strictly to enforce synchronous replication +// when cluster instances are unavailable. Options are `required` or `preferred`. +type DataDurabilityLevel string + +const ( + // DataDurabilityLevelRequired means that data durability is strictly enforced + DataDurabilityLevelRequired DataDurabilityLevel = "required" + + // DataDurabilityLevelPreferred means that data durability is enforced + // only when healthy replicas are available + DataDurabilityLevelPreferred DataDurabilityLevel = "preferred" +) // SynchronousReplicaConfiguration contains the configuration of the // PostgreSQL synchronous replication feature. // Important: at this moment, also `.spec.minSyncReplicas` and `.spec.maxSyncReplicas` // need to be considered. +// +kubebuilder:validation:XValidation:rule="self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) || self.standbyNamesPost.size()==0))",message="dataDurability set to 'preferred' requires empty 'standbyNamesPre' and empty 'standbyNamesPost'" type SynchronousReplicaConfiguration struct { // Method to select synchronous replication standbys from the listed // servers, accepting 'any' (quorum-based synchronous replication) or @@ -1458,6 +1375,18 @@ type SynchronousReplicaConfiguration struct { // only useful for priority-based synchronous replication). // +optional StandbyNamesPost []string `json:"standbyNamesPost,omitempty"` + + // If set to "required", data durability is strictly enforced. Write operations + // with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + // block if there are insufficient healthy replicas, ensuring data persistence. + // If set to "preferred", data durability is maintained when healthy replicas + // are available, but the required number of instances will adjust dynamically + // if replicas become unavailable. This setting relaxes strict durability enforcement + // to allow for operational continuity. This setting is only applicable if both + // `standbyNamesPre` and `standbyNamesPost` are unset (empty). + // +kubebuilder:validation:Enum=required;preferred + // +optional + DataDurability DataDurabilityLevel `json:"dataDurability,omitempty"` } // PostgresConfiguration defines the PostgreSQL configuration @@ -1505,6 +1434,37 @@ type PostgresConfiguration struct { // Defaults to false. // +optional EnableAlterSystem bool `json:"enableAlterSystem,omitempty"` + + // The configuration of the extensions to be added + // +optional + Extensions []ExtensionConfiguration `json:"extensions,omitempty"` +} + +// ExtensionConfiguration is the configuration used to add +// PostgreSQL extensions to the Cluster. +type ExtensionConfiguration struct { + // The name of the extension, required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$` + Name string `json:"name"` + + // The image containing the extension, required + // +kubebuilder:validation:XValidation:rule="has(self.reference)",message="An image reference is required" + ImageVolumeSource corev1.ImageVolumeSource `json:"image"` + + // The list of directories inside the image which should be added to extension_control_path. + // If not defined, defaults to "/share". + // +optional + ExtensionControlPath []string `json:"extension_control_path,omitempty"` + + // The list of directories inside the image which should be added to dynamic_library_path. + // If not defined, defaults to "/lib". + // +optional + DynamicLibraryPath []string `json:"dynamic_library_path,omitempty"` + + // The list of directories inside the image which should be added to ld_library_path. + // +optional + LdLibraryPath []string `json:"ld_library_path,omitempty"` } // BootstrapConfiguration contains information about how to create the PostgreSQL @@ -1640,14 +1600,6 @@ type CertificatesConfiguration struct { ServerAltDNSNames []string `json:"serverAltDNSNames,omitempty"` } -func (c *CertificatesConfiguration) getServerAltDNSNames() []string { - if c == nil { - return nil - } - - return c.ServerAltDNSNames -} - // CertificatesStatus contains configuration certificates and related expiration dates. type CertificatesStatus struct { // Needed configurations to handle server certificates, initialized with default values, if needed. @@ -1661,6 +1613,9 @@ type CertificatesStatus struct { // BootstrapInitDB is the configuration of the bootstrap process when // initdb is used // Refer to the Bootstrap page of the documentation for more information. +// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`" type BootstrapInitDB struct { // Name of the database used by the application. Default: `app`. // +optional @@ -1701,6 +1656,33 @@ type BootstrapInitDB struct { // +optional LocaleCType string `json:"localeCType,omitempty"` + // Sets the default collation order and character classification in the new database. + // +optional + Locale string `json:"locale,omitempty"` + + // This option sets the locale provider for databases created in the new cluster. + // Available from PostgreSQL 16. + // +optional + LocaleProvider string `json:"localeProvider,omitempty"` + + // Specifies the ICU locale when the ICU provider is used. + // This option requires `localeProvider` to be set to `icu`. + // Available from PostgreSQL 15. + // +optional + IcuLocale string `json:"icuLocale,omitempty"` + + // Specifies additional collation rules to customize the behavior of the default collation. + // This option requires `localeProvider` to be set to `icu`. + // Available from PostgreSQL 16. + // +optional + IcuRules string `json:"icuRules,omitempty"` + + // Specifies the locale name when the builtin provider is used. + // This option requires `localeProvider` to be set to `builtin`. + // Available from PostgreSQL 17. + // +optional + BuiltinLocale string `json:"builtinLocale,omitempty"` + // The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` // option for initdb (default: empty, resulting in PostgreSQL default: 16MB) // +kubebuilder:validation:Minimum=1 @@ -1799,6 +1781,20 @@ type Import struct { // `pg_restore` are invoked, avoiding data import. Default: `false`. // +optional SchemaOnly bool `json:"schemaOnly,omitempty"` + + // List of custom options to pass to the `pg_dump` command. IMPORTANT: + // Use these options with caution and at your own risk, as the operator + // does not validate their content. Be aware that certain options may + // conflict with the operator's intended functionality or design. + // +optional + PgDumpExtraOptions []string `json:"pgDumpExtraOptions,omitempty"` + + // List of custom options to pass to the `pg_restore` command. IMPORTANT: + // Use these options with caution and at your own risk, as the operator + // does not validate their content. Be aware that certain options may + // conflict with the operator's intended functionality or design. + // +optional + PgRestoreExtraOptions []string `json:"pgRestoreExtraOptions,omitempty"` } // ImportSource describes the source for the logical snapshot @@ -1822,16 +1818,6 @@ type SQLRefs struct { ConfigMapRefs []ConfigMapKeySelector `json:"configMapRefs,omitempty"` } -// HasElements returns true if it contains any Reference -func (s *SQLRefs) HasElements() bool { - if s == nil { - return false - } - - return len(s.ConfigMapRefs) != 0 || - len(s.SecretRefs) != 0 -} - // BootstrapRecovery contains the configuration required to restore // from an existing cluster using 3 methodologies: external cluster, // volume snapshots or backup objects. Full recovery and Point-In-Time @@ -1981,26 +1967,6 @@ type RecoveryTarget struct { Exclusive *bool `json:"exclusive,omitempty"` } -// GetBackupID gets the backup ID -func (target *RecoveryTarget) GetBackupID() string { - return target.BackupID -} - -// GetTargetTime gets the target time -func (target *RecoveryTarget) GetTargetTime() string { - return target.TargetTime -} - -// GetTargetLSN gets the target LSN -func (target *RecoveryTarget) GetTargetLSN() string { - return target.TargetLSN -} - -// GetTargetTLI gets the target timeline -func (target *RecoveryTarget) GetTargetTLI() string { - return target.TargetTLI -} - // StorageConfiguration is the configuration used to create and reconcile PVCs, // usable for WAL volumes, PGDATA volumes, or tablespaces type StorageConfiguration struct { @@ -2027,28 +1993,6 @@ type StorageConfiguration struct { PersistentVolumeClaimTemplate *corev1.PersistentVolumeClaimSpec `json:"pvcTemplate,omitempty"` } -// GetSizeOrNil returns the requests storage size -func (s *StorageConfiguration) GetSizeOrNil() *resource.Quantity { - if s == nil { - return nil - } - - if s.Size != "" { - quantity, err := resource.ParseQuantity(s.Size) - if err != nil { - return nil - } - - return &quantity - } - - if s.PersistentVolumeClaimTemplate != nil { - return s.PersistentVolumeClaimTemplate.Resources.Requests.Storage() - } - - return nil -} - // TablespaceConfiguration is the configuration of a tablespace, and includes // the storage specification for the tablespace type TablespaceConfiguration struct { @@ -2141,17 +2085,6 @@ type AffinityConfiguration struct { AdditionalPodAffinity *corev1.PodAffinity `json:"additionalPodAffinity,omitempty"` } -// RollingUpdateStatus contains the information about an instance which is -// being updated -type RollingUpdateStatus struct { - // The image which we put into the Pod - ImageName string `json:"imageName"` - - // When the update has been started - // +optional - StartedAt metav1.Time `json:"startedAt,omitempty"` -} - // BackupTarget describes the preferred targets for a backup type BackupTarget string @@ -2235,11 +2168,6 @@ type MonitoringConfiguration struct { PodMonitorRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorRelabelings,omitempty"` } -// AreDefaultQueriesDisabled checks whether default monitoring queries should be disabled -func (m *MonitoringConfiguration) AreDefaultQueriesDisabled() bool { - return m != nil && m.DisableDefaultQueries != nil && *m.DisableDefaultQueries -} - // ClusterMonitoringTLSConfiguration is the type containing the TLS configuration // for the cluster's monitoring type ClusterMonitoringTLSConfiguration struct { @@ -2288,15 +2216,10 @@ type ExternalCluster struct { // The configuration for the barman-cloud tool suite // +optional BarmanObjectStore *BarmanObjectStoreConfiguration `json:"barmanObjectStore,omitempty"` -} -// GetServerName returns the server name, defaulting to the name of the external cluster or using the one specified -// in the BarmanObjectStore -func (in ExternalCluster) GetServerName() string { - if in.BarmanObjectStore != nil && in.BarmanObjectStore.ServerName != "" { - return in.BarmanObjectStore.ServerName - } - return in.Name + // The configuration of the plugin that is taking care + // of WAL archiving and backups for this external cluster + PluginConfiguration *PluginConfiguration `json:"plugin,omitempty"` } // EnsureOption represents whether we should enforce the presence or absence of @@ -2342,6 +2265,7 @@ type ManagedServices struct { // +optional DisabledDefaultServices []ServiceSelectorType `json:"disabledDefaultServices,omitempty"` // Additional is a list of additional managed services specified by the user. + // +optional Additional []ManagedService `json:"additional,omitempty"` } @@ -2350,11 +2274,11 @@ type ManagedServices struct { type ManagedService struct { // SelectorType specifies the type of selectors that the service will have. // Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. - // +kubebuilder:validation:Enum=rw;r;ro SelectorType ServiceSelectorType `json:"selectorType"` // UpdateStrategy describes how the service differences should be reconciled // +kubebuilder:default:="patch" + // +optional UpdateStrategy ServiceUpdateStrategy `json:"updateStrategy,omitempty"` // ServiceTemplate is the template specification for the service. @@ -2383,18 +2307,17 @@ type PluginConfiguration struct { // +optional Enabled *bool `json:"enabled,omitempty"` + // Only one plugin can be declared as WALArchiver. + // Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + // +kubebuilder:default:=false + // +optional + IsWALArchiver *bool `json:"isWALArchiver,omitempty"` + // Parameters is the configuration of the plugin + // +optional Parameters map[string]string `json:"parameters,omitempty"` } -// IsEnabled returns true when this plugin is enabled -func (config *PluginConfiguration) IsEnabled() bool { - if config.Enabled == nil { - return true - } - return *config.Enabled -} - // PluginStatus is the status of a loaded plugin type PluginStatus struct { // Name is the name of the plugin @@ -2406,21 +2329,31 @@ type PluginStatus struct { // Capabilities are the list of capabilities of the // plugin + // +optional Capabilities []string `json:"capabilities,omitempty"` // OperatorCapabilities are the list of capabilities of the // plugin regarding the reconciler + // +optional OperatorCapabilities []string `json:"operatorCapabilities,omitempty"` // WALCapabilities are the list of capabilities of the // plugin regarding the WAL management + // +optional WALCapabilities []string `json:"walCapabilities,omitempty"` // BackupCapabilities are the list of capabilities of the // plugin regarding the Backup management + // +optional BackupCapabilities []string `json:"backupCapabilities,omitempty"` + // RestoreJobHookCapabilities are the list of capabilities of the + // plugin regarding the RestoreJobHook management + // +optional + RestoreJobHookCapabilities []string `json:"restoreJobHookCapabilities,omitempty"` + // Status contain the status reported by the plugin through the SetStatusInCluster interface + // +optional Status string `json:"status,omitempty"` } @@ -2516,22 +2449,6 @@ type RoleConfiguration struct { BypassRLS bool `json:"bypassrls,omitempty"` // Row-Level Security } -// GetRoleSecretsName gets the name of the secret which is used to store the role's password -func (roleConfiguration *RoleConfiguration) GetRoleSecretsName() string { - if roleConfiguration.PasswordSecret != nil { - return roleConfiguration.PasswordSecret.Name - } - return "" -} - -// GetRoleInherit return the inherit attribute of a roleConfiguration -func (roleConfiguration *RoleConfiguration) GetRoleInherit() bool { - if roleConfiguration.Inherit != nil { - return *roleConfiguration.Inherit - } - return true -} - // +genclient // +kubebuilder:object:root=true // +kubebuilder:storageversion @@ -2565,6 +2482,7 @@ type ClusterList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of clusters Items []Cluster `json:"items"` @@ -2628,1073 +2546,6 @@ type ConfigMapResourceVersion struct { Metrics map[string]string `json:"metrics,omitempty"` } -// SetManagedRoleSecretVersion Add or update or delete the resource version of the managed role secret -func (secretResourceVersion *SecretsResourceVersion) SetManagedRoleSecretVersion(secret string, version *string) { - if secretResourceVersion.ManagedRoleSecretVersions == nil { - secretResourceVersion.ManagedRoleSecretVersions = make(map[string]string) - } - if version == nil { - delete(secretResourceVersion.ManagedRoleSecretVersions, secret) - } else { - secretResourceVersion.ManagedRoleSecretVersions[secret] = *version - } -} - -// SetExternalClusterSecretVersion Add or update or delete the resource version of the secret used in external clusters -func (secretResourceVersion *SecretsResourceVersion) SetExternalClusterSecretVersion( - secretName string, - version *string, -) { - if secretResourceVersion.ExternalClusterSecretVersions == nil { - secretResourceVersion.ExternalClusterSecretVersions = make(map[string]string) - } - - if version == nil { - delete(secretResourceVersion.ExternalClusterSecretVersions, secretName) - return - } - - secretResourceVersion.ExternalClusterSecretVersions[secretName] = *version -} - -// SetInContext records the cluster in the given context -func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { - return context.WithValue(ctx, utils.ContextKeyCluster, cluster) -} - -// GetImageName get the name of the image that should be used -// to create the pods -func (cluster *Cluster) GetImageName() string { - // If the image is specified in the status, use that one - // It should be there since the first reconciliation - if len(cluster.Status.Image) > 0 { - return cluster.Status.Image - } - - // Fallback to the information we have in the spec - if len(cluster.Spec.ImageName) > 0 { - return cluster.Spec.ImageName - } - - // TODO: check: does a scenario exists in which we do have an imageCatalog - // and no status.image? In that case this should probably error out, not - // returning the default image name. - return configuration.Current.PostgresImageName -} - -// GetPostgresqlVersion gets the PostgreSQL image version detecting it from the -// image name or from the ImageCatalogRef. -// Example: -// -// ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version 140000 -// ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version 130002 -// ghcr.io/cloudnative-pg/postgresql:9.6.3 corresponds to version 90603 -func (cluster *Cluster) GetPostgresqlVersion() (int, error) { - if cluster.Spec.ImageCatalogRef != nil { - return postgres.GetPostgresVersionFromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) - } - - image := cluster.GetImageName() - tag := utils.GetImageTag(image) - return postgres.GetPostgresVersionFromTag(tag) -} - -// GetPostgresqlMajorVersion gets the PostgreSQL image major version used in the Cluster -func (cluster *Cluster) GetPostgresqlMajorVersion() (int, error) { - version, err := cluster.GetPostgresqlVersion() - if err != nil { - return 0, err - } - return postgres.GetPostgresMajorVersion(version), nil -} - -// GetImagePullSecret get the name of the pull secret to use -// to download the PostgreSQL image -func (cluster *Cluster) GetImagePullSecret() string { - return cluster.Name + ClusterSecretSuffix -} - -// GetSuperuserSecretName get the secret name of the PostgreSQL superuser -func (cluster *Cluster) GetSuperuserSecretName() string { - if cluster.Spec.SuperuserSecret != nil && - cluster.Spec.SuperuserSecret.Name != "" { - return cluster.Spec.SuperuserSecret.Name - } - - return fmt.Sprintf("%v%v", cluster.Name, SuperUserSecretSuffix) -} - -// GetEnableLDAPAuth return true if bind or bind+search method are -// configured in the cluster configuration -func (cluster *Cluster) GetEnableLDAPAuth() bool { - if cluster.Spec.PostgresConfiguration.LDAP != nil && - (cluster.Spec.PostgresConfiguration.LDAP.BindAsAuth != nil || - cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil) { - return true - } - return false -} - -// GetLDAPSecretName gets the secret name containing the LDAP password -func (cluster *Cluster) GetLDAPSecretName() string { - if cluster.Spec.PostgresConfiguration.LDAP != nil && - cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil && - cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword != nil { - return cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword.Name - } - return "" -} - -// ContainsManagedRolesConfiguration returns true iff there are managed roles configured -func (cluster *Cluster) ContainsManagedRolesConfiguration() bool { - return cluster.Spec.Managed != nil && len(cluster.Spec.Managed.Roles) > 0 -} - -// GetExternalClusterSecrets returns the secrets used by external Clusters -func (cluster *Cluster) GetExternalClusterSecrets() *stringset.Data { - secrets := stringset.New() - - if cluster.Spec.ExternalClusters != nil { - for _, externalCluster := range cluster.Spec.ExternalClusters { - if externalCluster.Password != nil { - secrets.Put(externalCluster.Password.Name) - } - if externalCluster.SSLKey != nil { - secrets.Put(externalCluster.SSLKey.Name) - } - if externalCluster.SSLCert != nil { - secrets.Put(externalCluster.SSLCert.Name) - } - if externalCluster.SSLRootCert != nil { - secrets.Put(externalCluster.SSLRootCert.Name) - } - } - } - return secrets -} - -// UsesSecretInManagedRoles checks if the given secret name is used in a managed role -func (cluster *Cluster) UsesSecretInManagedRoles(secretName string) bool { - if !cluster.ContainsManagedRolesConfiguration() { - return false - } - for _, role := range cluster.Spec.Managed.Roles { - if role.PasswordSecret != nil && role.PasswordSecret.Name == secretName { - return true - } - } - return false -} - -// GetApplicationSecretName get the name of the application secret for any bootstrap type -func (cluster *Cluster) GetApplicationSecretName() string { - bootstrap := cluster.Spec.Bootstrap - if bootstrap == nil { - return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) - } - recovery := bootstrap.Recovery - if recovery != nil && recovery.Secret != nil && recovery.Secret.Name != "" { - return recovery.Secret.Name - } - - pgBaseBackup := bootstrap.PgBaseBackup - if pgBaseBackup != nil && pgBaseBackup.Secret != nil && pgBaseBackup.Secret.Name != "" { - return pgBaseBackup.Secret.Name - } - - initDB := bootstrap.InitDB - if initDB != nil && initDB.Secret != nil && initDB.Secret.Name != "" { - return initDB.Secret.Name - } - - return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) -} - -// GetApplicationDatabaseName get the name of the application database for a specific bootstrap -func (cluster *Cluster) GetApplicationDatabaseName() string { - bootstrap := cluster.Spec.Bootstrap - if bootstrap == nil { - return "" - } - - if bootstrap.Recovery != nil && bootstrap.Recovery.Database != "" { - return bootstrap.Recovery.Database - } - - if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Database != "" { - return bootstrap.PgBaseBackup.Database - } - - if bootstrap.InitDB != nil && bootstrap.InitDB.Database != "" { - return bootstrap.InitDB.Database - } - - return "" -} - -// GetApplicationDatabaseOwner get the owner user of the application database for a specific bootstrap -func (cluster *Cluster) GetApplicationDatabaseOwner() string { - bootstrap := cluster.Spec.Bootstrap - if bootstrap == nil { - return "" - } - - if bootstrap.Recovery != nil && bootstrap.Recovery.Owner != "" { - return bootstrap.Recovery.Owner - } - - if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Owner != "" { - return bootstrap.PgBaseBackup.Owner - } - - if bootstrap.InitDB != nil && bootstrap.InitDB.Owner != "" { - return bootstrap.InitDB.Owner - } - - return "" -} - -// GetServerCASecretName get the name of the secret containing the CA -// of the cluster -func (cluster *Cluster) GetServerCASecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerCASecret != "" { - return cluster.Spec.Certificates.ServerCASecret - } - return fmt.Sprintf("%v%v", cluster.Name, DefaultServerCaSecretSuffix) -} - -// GetServerTLSSecretName get the name of the secret containing the -// certificate that is used for the PostgreSQL servers -func (cluster *Cluster) GetServerTLSSecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerTLSSecret != "" { - return cluster.Spec.Certificates.ServerTLSSecret - } - return fmt.Sprintf("%v%v", cluster.Name, ServerSecretSuffix) -} - -// GetClientCASecretName get the name of the secret containing the CA -// of the cluster -func (cluster *Cluster) GetClientCASecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ClientCASecret != "" { - return cluster.Spec.Certificates.ClientCASecret - } - return fmt.Sprintf("%v%v", cluster.Name, ClientCaSecretSuffix) -} - -// GetFixedInheritedAnnotations gets the annotations that should be -// inherited by all resources according the cluster spec -func (cluster *Cluster) GetFixedInheritedAnnotations() map[string]string { - if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Annotations == nil { - return nil - } - return cluster.Spec.InheritedMetadata.Annotations -} - -// GetFixedInheritedLabels gets the labels that should be -// inherited by all resources according the cluster spec -func (cluster *Cluster) GetFixedInheritedLabels() map[string]string { - if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Labels == nil { - return nil - } - return cluster.Spec.InheritedMetadata.Labels -} - -// GetReplicationSecretName get the name of the secret for the replication user -func (cluster *Cluster) GetReplicationSecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ReplicationTLSSecret != "" { - return cluster.Spec.Certificates.ReplicationTLSSecret - } - return fmt.Sprintf("%v%v", cluster.Name, ReplicationSecretSuffix) -} - -// GetServiceAnyName return the name of the service that is used as DNS -// domain for all the nodes, even if they are not ready -func (cluster *Cluster) GetServiceAnyName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceAnySuffix) -} - -// GetServiceReadName return the default name of the service that is used for -// read transactions (including the primary) -func (cluster *Cluster) GetServiceReadName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceReadSuffix) -} - -// GetServiceReadOnlyName return the default name of the service that is used for -// read-only transactions (excluding the primary) -func (cluster *Cluster) GetServiceReadOnlyName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceReadOnlySuffix) -} - -// GetServiceReadWriteName return the default name of the service that is used for -// read-write transactions -func (cluster *Cluster) GetServiceReadWriteName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceReadWriteSuffix) -} - -// GetMaxStartDelay get the amount of time of startDelay config option -func (cluster *Cluster) GetMaxStartDelay() int32 { - if cluster.Spec.MaxStartDelay > 0 { - return cluster.Spec.MaxStartDelay - } - return DefaultStartupDelay -} - -// GetMaxStopDelay get the amount of time PostgreSQL has to stop -func (cluster *Cluster) GetMaxStopDelay() int32 { - if cluster.Spec.MaxStopDelay > 0 { - return cluster.Spec.MaxStopDelay - } - return 1800 -} - -// GetSmartShutdownTimeout is used to ensure that smart shutdown timeout is a positive integer -func (cluster *Cluster) GetSmartShutdownTimeout() int32 { - if cluster.Spec.SmartShutdownTimeout != nil { - return *cluster.Spec.SmartShutdownTimeout - } - return 180 -} - -// GetRestartTimeout is used to have a timeout for operations that involve -// a restart of a PostgreSQL instance -func (cluster *Cluster) GetRestartTimeout() int32 { - return cluster.GetMaxStopDelay() + cluster.GetMaxStartDelay() -} - -// GetMaxSwitchoverDelay get the amount of time PostgreSQL has to stop before switchover -func (cluster *Cluster) GetMaxSwitchoverDelay() int32 { - if cluster.Spec.MaxSwitchoverDelay > 0 { - return cluster.Spec.MaxSwitchoverDelay - } - return DefaultMaxSwitchoverDelay -} - -// GetPrimaryUpdateStrategy get the cluster primary update strategy, -// defaulting to unsupervised -func (cluster *Cluster) GetPrimaryUpdateStrategy() PrimaryUpdateStrategy { - strategy := cluster.Spec.PrimaryUpdateStrategy - if strategy == "" { - return PrimaryUpdateStrategyUnsupervised - } - - return strategy -} - -// GetPrimaryUpdateMethod get the cluster primary update method, -// defaulting to restart -func (cluster *Cluster) GetPrimaryUpdateMethod() PrimaryUpdateMethod { - strategy := cluster.Spec.PrimaryUpdateMethod - if strategy == "" { - return PrimaryUpdateMethodRestart - } - - return strategy -} - -// GetEnablePDB get the cluster EnablePDB value, defaults to true -func (cluster *Cluster) GetEnablePDB() bool { - if cluster.Spec.EnablePDB == nil { - return true - } - - return *cluster.Spec.EnablePDB -} - -// IsNodeMaintenanceWindowInProgress check if the upgrade mode is active or not -func (cluster *Cluster) IsNodeMaintenanceWindowInProgress() bool { - return cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.InProgress -} - -// GetPgCtlTimeoutForPromotion returns the timeout that should be waited for an instance to be promoted -// to primary. As default, DefaultPgCtlTimeoutForPromotion is big enough to simulate an infinite timeout -func (cluster *Cluster) GetPgCtlTimeoutForPromotion() int32 { - timeout := cluster.Spec.PostgresConfiguration.PgCtlTimeoutForPromotion - if timeout == 0 { - return DefaultPgCtlTimeoutForPromotion - } - return timeout -} - -// IsReusePVCEnabled check if in a maintenance window we should reuse PVCs -func (cluster *Cluster) IsReusePVCEnabled() bool { - reusePVC := true - if cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.ReusePVC != nil { - reusePVC = *cluster.Spec.NodeMaintenanceWindow.ReusePVC - } - return reusePVC -} - -// IsInstanceFenced check if in a given instance should be fenced -func (cluster *Cluster) IsInstanceFenced(instance string) bool { - fencedInstances, err := utils.GetFencedInstances(cluster.Annotations) - if err != nil { - return false - } - - if fencedInstances.Has(utils.FenceAllInstances) { - return true - } - return fencedInstances.Has(instance) -} - -// ShouldResizeInUseVolumes is true when we should resize PVC we already -// created -func (cluster *Cluster) ShouldResizeInUseVolumes() bool { - if cluster.Spec.StorageConfiguration.ResizeInUseVolumes == nil { - return true - } - - return *cluster.Spec.StorageConfiguration.ResizeInUseVolumes -} - -// ShouldCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase, we need to create a secret to store application credentials -func (cluster *Cluster) ShouldCreateApplicationSecret() bool { - return cluster.ShouldInitDBCreateApplicationSecret() || - cluster.ShouldPgBaseBackupCreateApplicationSecret() || - cluster.ShouldRecoveryCreateApplicationSecret() -} - -// ShouldInitDBCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase using initDB, we need to create an new application secret -func (cluster *Cluster) ShouldInitDBCreateApplicationSecret() bool { - return cluster.ShouldInitDBCreateApplicationDatabase() && - (cluster.Spec.Bootstrap.InitDB.Secret == nil || - cluster.Spec.Bootstrap.InitDB.Secret.Name == "") -} - -// ShouldPgBaseBackupCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase using pg_basebackup, we need to create an application secret -func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationSecret() bool { - return cluster.ShouldPgBaseBackupCreateApplicationDatabase() && - (cluster.Spec.Bootstrap.PgBaseBackup.Secret == nil || - cluster.Spec.Bootstrap.PgBaseBackup.Secret.Name == "") -} - -// ShouldRecoveryCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase using recovery, we need to create an application secret -func (cluster *Cluster) ShouldRecoveryCreateApplicationSecret() bool { - return cluster.ShouldRecoveryCreateApplicationDatabase() && - (cluster.Spec.Bootstrap.Recovery.Secret == nil || - cluster.Spec.Bootstrap.Recovery.Secret.Name == "") -} - -// ShouldCreateApplicationDatabase returns true if for this cluster, -// during the bootstrap phase, we need to create an application database -func (cluster *Cluster) ShouldCreateApplicationDatabase() bool { - return cluster.ShouldInitDBCreateApplicationDatabase() || - cluster.ShouldRecoveryCreateApplicationDatabase() || - cluster.ShouldPgBaseBackupCreateApplicationDatabase() -} - -// ShouldInitDBRunPostInitApplicationSQLRefs returns true if for this cluster, -// during the bootstrap phase using initDB, we need to run post init SQL files -// for the application database from provided references. -func (cluster *Cluster) ShouldInitDBRunPostInitApplicationSQLRefs() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - return cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQLRefs.HasElements() -} - -// ShouldInitDBRunPostInitTemplateSQLRefs returns true if for this cluster, -// during the bootstrap phase using initDB, we need to run post init SQL files -// for the `template1` database from provided references. -func (cluster *Cluster) ShouldInitDBRunPostInitTemplateSQLRefs() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - return cluster.Spec.Bootstrap.InitDB.PostInitTemplateSQLRefs.HasElements() -} - -// ShouldInitDBRunPostInitSQLRefs returns true if for this cluster, -// during the bootstrap phase using initDB, we need to run post init SQL files -// for the `postgres` database from provided references. -func (cluster *Cluster) ShouldInitDBRunPostInitSQLRefs() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - return cluster.Spec.Bootstrap.InitDB.PostInitSQLRefs.HasElements() -} - -// ShouldInitDBCreateApplicationDatabase returns true if the application database needs to be created during initdb -// job -func (cluster *Cluster) ShouldInitDBCreateApplicationDatabase() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - initDBParameters := cluster.Spec.Bootstrap.InitDB - return initDBParameters.Owner != "" && initDBParameters.Database != "" -} - -// ShouldPgBaseBackupCreateApplicationDatabase returns true if the application database needs to be created during the -// pg_basebackup job -func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationDatabase() bool { - // we skip creating the application database if cluster is a replica - if cluster.IsReplica() { - return false - } - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.PgBaseBackup == nil { - return false - } - - pgBaseBackupParameters := cluster.Spec.Bootstrap.PgBaseBackup - return pgBaseBackupParameters.Owner != "" && pgBaseBackupParameters.Database != "" -} - -// ShouldRecoveryCreateApplicationDatabase returns true if the application database needs to be created during the -// recovery job -func (cluster *Cluster) ShouldRecoveryCreateApplicationDatabase() bool { - // we skip creating the application database if cluster is a replica - if cluster.IsReplica() { - return false - } - - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.Recovery == nil { - return false - } - - recoveryParameters := cluster.Spec.Bootstrap.Recovery - return recoveryParameters.Owner != "" && recoveryParameters.Database != "" -} - -// ShouldCreateProjectedVolume returns whether we should create the projected all in one volume -func (cluster *Cluster) ShouldCreateProjectedVolume() bool { - return cluster.Spec.ProjectedVolumeTemplate != nil -} - -// ShouldCreateWalArchiveVolume returns whether we should create the wal archive volume -func (cluster *Cluster) ShouldCreateWalArchiveVolume() bool { - return cluster.Spec.WalStorage != nil -} - -// ShouldPromoteFromReplicaCluster returns true if the cluster should promote -func (cluster *Cluster) ShouldPromoteFromReplicaCluster() bool { - // If there's no replica cluster configuration there's no - // promotion token too, so we don't need to promote. - if cluster.Spec.ReplicaCluster == nil { - return false - } - - // If we don't have a promotion token, we don't need to promote - if len(cluster.Spec.ReplicaCluster.PromotionToken) == 0 { - return false - } - - // If the current token was already used, there's no need to - // promote - if cluster.Spec.ReplicaCluster.PromotionToken == cluster.Status.LastPromotionToken { - return false - } - return true -} - -// ContainsTablespaces returns true if for this cluster, we need to create tablespaces -func (cluster *Cluster) ContainsTablespaces() bool { - return len(cluster.Spec.Tablespaces) != 0 -} - -// GetPostgresUID returns the UID that is being used for the "postgres" -// user -func (cluster Cluster) GetPostgresUID() int64 { - if cluster.Spec.PostgresUID == 0 { - return defaultPostgresUID - } - return cluster.Spec.PostgresUID -} - -// GetPostgresGID returns the GID that is being used for the "postgres" -// user -func (cluster Cluster) GetPostgresGID() int64 { - if cluster.Spec.PostgresGID == 0 { - return defaultPostgresGID - } - return cluster.Spec.PostgresGID -} - -// ExternalCluster gets the external server with a known name, returning -// true if the server was found and false otherwise -func (cluster Cluster) ExternalCluster(name string) (ExternalCluster, bool) { - for _, server := range cluster.Spec.ExternalClusters { - if server.Name == name { - return server, true - } - } - - return ExternalCluster{}, false -} - -// IsReplica checks if this is a replica cluster or not -func (cluster Cluster) IsReplica() bool { - // Before introducing the "primary" field, the - // "enabled" parameter was declared as a "boolean" - // and was not declared "omitempty". - // - // Legacy replica clusters will have the "replica" stanza - // and the "enabled" field set explicitly to true. - // - // The following code is designed to not change the - // previous semantics. - r := cluster.Spec.ReplicaCluster - if r == nil { - return false - } - - if r.Enabled != nil { - return *r.Enabled - } - - clusterName := r.Self - if len(clusterName) == 0 { - clusterName = cluster.Name - } - - return clusterName != r.Primary -} - -var slotNameNegativeRegex = regexp.MustCompile("[^a-z0-9_]+") - -// GetSlotNameFromInstanceName returns the slot name, given the instance name. -// It returns an empty string if High Availability Replication Slots are disabled -func (cluster Cluster) GetSlotNameFromInstanceName(instanceName string) string { - if cluster.Spec.ReplicationSlots == nil || - cluster.Spec.ReplicationSlots.HighAvailability == nil || - !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() { - return "" - } - - return cluster.Spec.ReplicationSlots.HighAvailability.GetSlotNameFromInstanceName(instanceName) -} - -// GetBarmanEndpointCAForReplicaCluster checks if this is a replica cluster which needs barman endpoint CA -func (cluster Cluster) GetBarmanEndpointCAForReplicaCluster() *SecretKeySelector { - if !cluster.IsReplica() { - return nil - } - sourceName := cluster.Spec.ReplicaCluster.Source - externalCluster, found := cluster.ExternalCluster(sourceName) - if !found || externalCluster.BarmanObjectStore == nil { - return nil - } - return externalCluster.BarmanObjectStore.EndpointCA -} - -// GetClusterAltDNSNames returns all the names needed to build a valid Server Certificate -func (cluster *Cluster) GetClusterAltDNSNames() []string { - buildServiceNames := func(serviceName string, enabled bool) []string { - if !enabled { - return nil - } - return []string{ - serviceName, - fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, cluster.Namespace), - } - } - altDNSNames := slices.Concat( - buildServiceNames(cluster.GetServiceReadWriteName(), cluster.IsReadWriteServiceEnabled()), - buildServiceNames(cluster.GetServiceReadName(), cluster.IsReadServiceEnabled()), - buildServiceNames(cluster.GetServiceReadOnlyName(), cluster.IsReadOnlyServiceEnabled()), - ) - - if cluster.Spec.Managed != nil && cluster.Spec.Managed.Services != nil { - for _, service := range cluster.Spec.Managed.Services.Additional { - altDNSNames = append(altDNSNames, buildServiceNames(service.ServiceTemplate.ObjectMeta.Name, true)...) - } - } - - return append(altDNSNames, cluster.Spec.Certificates.getServerAltDNSNames()...) -} - -// UsesSecret checks whether a given secret is used by a Cluster. -// -// This function is also used to discover the set of clusters that -// should be reconciled when a certain secret changes. -func (cluster *Cluster) UsesSecret(secret string) bool { - if _, ok := cluster.Status.SecretsResourceVersion.Metrics[secret]; ok { - return true - } - certificates := cluster.Status.Certificates - switch secret { - case cluster.GetSuperuserSecretName(), - cluster.GetApplicationSecretName(), - certificates.ClientCASecret, - certificates.ReplicationTLSSecret, - certificates.ServerCASecret, - certificates.ServerTLSSecret: - return true - } - - if cluster.UsesSecretInManagedRoles(secret) { - return true - } - - if cluster.Spec.Backup.IsBarmanEndpointCASet() && cluster.Spec.Backup.BarmanObjectStore.EndpointCA.Name == secret { - return true - } - - if endpointCA := cluster.GetBarmanEndpointCAForReplicaCluster(); endpointCA != nil && endpointCA.Name == secret { - return true - } - - if cluster.Status.PoolerIntegrations != nil { - for _, pgBouncerSecretName := range cluster.Status.PoolerIntegrations.PgBouncerIntegration.Secrets { - if pgBouncerSecretName == secret { - return true - } - } - } - - // watch the secrets defined in external clusters - return cluster.GetExternalClusterSecrets().Has(secret) -} - -// UsesConfigMap checks whether a given secret is used by a Cluster -func (cluster *Cluster) UsesConfigMap(config string) (ok bool) { - if _, ok := cluster.Status.ConfigMapResourceVersion.Metrics[config]; ok { - return true - } - return false -} - -// IsPodMonitorEnabled checks if the PodMonitor object needs to be created -func (cluster *Cluster) IsPodMonitorEnabled() bool { - if cluster.Spec.Monitoring != nil { - return cluster.Spec.Monitoring.EnablePodMonitor - } - - return false -} - -// IsMetricsTLSEnabled checks if the metrics endpoint should use TLS -func (cluster *Cluster) IsMetricsTLSEnabled() bool { - if cluster.Spec.Monitoring != nil && cluster.Spec.Monitoring.TLSConfig != nil { - return cluster.Spec.Monitoring.TLSConfig.Enabled - } - - return false -} - -// GetEnableSuperuserAccess returns if the superuser access is enabled or not -func (cluster *Cluster) GetEnableSuperuserAccess() bool { - if cluster.Spec.EnableSuperuserAccess != nil { - return *cluster.Spec.EnableSuperuserAccess - } - - return false -} - -// LogTimestampsWithMessage prints useful information about timestamps in stdout -func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage string) { - contextLogger := log.FromContext(ctx) - - currentTimestamp := utils.GetCurrentTimestamp() - keysAndValues := []interface{}{ - "phase", cluster.Status.Phase, - "currentTimestamp", currentTimestamp, - "targetPrimaryTimestamp", cluster.Status.TargetPrimaryTimestamp, - "currentPrimaryTimestamp", cluster.Status.CurrentPrimaryTimestamp, - } - - var errs []string - - // Elapsed time since the last request of promotion (TargetPrimaryTimestamp) - if diff, err := utils.DifferenceBetweenTimestamps( - currentTimestamp, - cluster.Status.TargetPrimaryTimestamp, - ); err == nil { - keysAndValues = append( - keysAndValues, - "msPassedSinceTargetPrimaryTimestamp", - diff.Milliseconds(), - ) - } else { - errs = append(errs, err.Error()) - } - - // Elapsed time since the last promotion (CurrentPrimaryTimestamp) - if currentPrimaryDifference, err := utils.DifferenceBetweenTimestamps( - currentTimestamp, - cluster.Status.CurrentPrimaryTimestamp, - ); err == nil { - keysAndValues = append( - keysAndValues, - "msPassedSinceCurrentPrimaryTimestamp", - currentPrimaryDifference.Milliseconds(), - ) - } else { - errs = append(errs, err.Error()) - } - - // Difference between the last promotion and the last request of promotion - // When positive, it is the amount of time required in the last promotion - // of a standby to a primary. If negative, it means we have a failover/switchover - // in progress, and the value represents the last measured uptime of the primary. - if currentPrimaryTargetDifference, err := utils.DifferenceBetweenTimestamps( - cluster.Status.CurrentPrimaryTimestamp, - cluster.Status.TargetPrimaryTimestamp, - ); err == nil { - keysAndValues = append( - keysAndValues, - "msDifferenceBetweenCurrentAndTargetPrimary", - currentPrimaryTargetDifference.Milliseconds(), - ) - } else { - errs = append(errs, err.Error()) - } - - if len(errs) > 0 { - keysAndValues = append(keysAndValues, "timestampParsingErrors", errs) - } - - contextLogger.Info(logMessage, keysAndValues...) -} - -// SetInheritedDataAndOwnership sets the cluster as owner of the passed object and then -// sets all the needed annotations and labels -func (cluster *Cluster) SetInheritedDataAndOwnership(obj *metav1.ObjectMeta) { - cluster.SetInheritedData(obj) - utils.SetAsOwnedBy(obj, cluster.ObjectMeta, cluster.TypeMeta) -} - -// SetInheritedData sets all the needed annotations and labels -func (cluster *Cluster) SetInheritedData(obj *metav1.ObjectMeta) { - utils.InheritAnnotations(obj, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) - utils.InheritLabels(obj, cluster.Labels, cluster.GetFixedInheritedLabels(), configuration.Current) - utils.LabelClusterName(obj, cluster.GetName()) - utils.SetOperatorVersion(obj, versions.Version) -} - -// ShouldForceLegacyBackup if present takes a backup without passing the name argument even on barman version 3.3.0+. -// This is needed to test both backup system in the E2E suite -func (cluster *Cluster) ShouldForceLegacyBackup() bool { - return cluster.Annotations[utils.LegacyBackupAnnotationName] == "true" -} - -// GetSeccompProfile return the proper SeccompProfile set in the cluster for Pods and Containers -func (cluster *Cluster) GetSeccompProfile() *corev1.SeccompProfile { - if cluster.Spec.SeccompProfile != nil { - return cluster.Spec.SeccompProfile - } - - return &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } -} - -// GetCoredumpFilter get the coredump filter value from the cluster annotation -func (cluster *Cluster) GetCoredumpFilter() string { - value, ok := cluster.Annotations[utils.CoredumpFilter] - if ok { - return value - } - return system.DefaultCoredumpFilter -} - -// IsInplaceRestartPhase returns true if the cluster is in a phase that handles the Inplace restart -func (cluster *Cluster) IsInplaceRestartPhase() bool { - return cluster.Status.Phase == PhaseInplacePrimaryRestart || - cluster.Status.Phase == PhaseInplaceDeletePrimaryRestart -} - -// GetTablespaceConfiguration returns the tablespaceConfiguration for the given name -// otherwise return nil -func (cluster *Cluster) GetTablespaceConfiguration(name string) *TablespaceConfiguration { - for _, tbsConfig := range cluster.Spec.Tablespaces { - if name == tbsConfig.Name { - return &tbsConfig - } - } - - return nil -} - -// GetServerCASecretObjectKey returns a types.NamespacedName pointing to the secret -func (cluster *Cluster) GetServerCASecretObjectKey() types.NamespacedName { - return types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.GetServerCASecretName()} -} - -// IsBarmanBackupConfigured returns true if one of the possible backup destination -// is configured, false otherwise -func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool { - return backupConfiguration != nil && backupConfiguration.BarmanObjectStore != nil && - backupConfiguration.BarmanObjectStore.BarmanCredentials.ArePopulated() -} - -// IsBarmanEndpointCASet returns true if we have a CA bundle for the endpoint -// false otherwise -func (backupConfiguration *BackupConfiguration) IsBarmanEndpointCASet() bool { - return backupConfiguration != nil && - backupConfiguration.BarmanObjectStore != nil && - backupConfiguration.BarmanObjectStore.EndpointCA != nil && - backupConfiguration.BarmanObjectStore.EndpointCA.Name != "" && - backupConfiguration.BarmanObjectStore.EndpointCA.Key != "" -} - -// UpdateBackupTimes sets the firstRecoverabilityPoint and lastSuccessfulBackup -// for the provided method, as well as the overall firstRecoverabilityPoint and -// lastSuccessfulBackup for the cluster -func (cluster *Cluster) UpdateBackupTimes( - backupMethod BackupMethod, - firstRecoverabilityPoint *time.Time, - lastSuccessfulBackup *time.Time, -) { - type comparer func(a metav1.Time, b metav1.Time) bool - // tryGetMaxTime gets either the newest or oldest time from a set of backup times, - // depending on the comparer argument passed to it - tryGetMaxTime := func(m map[BackupMethod]metav1.Time, compare comparer) string { - var maximum metav1.Time - for _, ts := range m { - if maximum.IsZero() || compare(ts, maximum) { - maximum = ts - } - } - result := "" - if !maximum.IsZero() { - result = maximum.Format(time.RFC3339) - } - - return result - } - - setTime := func(backupTimes map[BackupMethod]metav1.Time, value *time.Time) map[BackupMethod]metav1.Time { - if value == nil { - delete(backupTimes, backupMethod) - return backupTimes - } - - if backupTimes == nil { - backupTimes = make(map[BackupMethod]metav1.Time) - } - - backupTimes[backupMethod] = metav1.NewTime(*value) - return backupTimes - } - - cluster.Status.FirstRecoverabilityPointByMethod = setTime(cluster.Status.FirstRecoverabilityPointByMethod, - firstRecoverabilityPoint) - cluster.Status.FirstRecoverabilityPoint = tryGetMaxTime( - cluster.Status.FirstRecoverabilityPointByMethod, - // we pass a comparer to get the first among the recoverability points - func(a metav1.Time, b metav1.Time) bool { - return a.Before(&b) - }) - - cluster.Status.LastSuccessfulBackupByMethod = setTime(cluster.Status.LastSuccessfulBackupByMethod, - lastSuccessfulBackup) - cluster.Status.LastSuccessfulBackup = tryGetMaxTime( - cluster.Status.LastSuccessfulBackupByMethod, - // we pass a comparer to get the last among the last backup times per method - func(a metav1.Time, b metav1.Time) bool { - return b.Before(&a) - }) -} - -// IsReadServiceEnabled checks if the read service is enabled for the cluster. -// It returns false if the read service is listed in the DisabledDefaultServices slice. -func (cluster *Cluster) IsReadServiceEnabled() bool { - if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { - return true - } - - return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeR) -} - -// IsReadWriteServiceEnabled checks if the read-write service is enabled for the cluster. -// It returns false if the read-write service is listed in the DisabledDefaultServices slice. -func (cluster *Cluster) IsReadWriteServiceEnabled() bool { - if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { - return true - } - return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRW) -} - -// IsReadOnlyServiceEnabled checks if the read-only service is enabled for the cluster. -// It returns false if the read-only service is listed in the DisabledDefaultServices slice. -func (cluster *Cluster) IsReadOnlyServiceEnabled() bool { - if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { - return true - } - - return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRO) -} - -// BuildPostgresOptions create the list of options that -// should be added to the PostgreSQL configuration to -// recover given a certain target -func (target *RecoveryTarget) BuildPostgresOptions() string { - result := "" - - if target == nil { - return result - } - - if target.TargetTLI != "" { - result += fmt.Sprintf( - "recovery_target_timeline = '%v'\n", - target.TargetTLI) - } - if target.TargetXID != "" { - result += fmt.Sprintf( - "recovery_target_xid = '%v'\n", - target.TargetXID) - } - if target.TargetName != "" { - result += fmt.Sprintf( - "recovery_target_name = '%v'\n", - target.TargetName) - } - if target.TargetLSN != "" { - result += fmt.Sprintf( - "recovery_target_lsn = '%v'\n", - target.TargetLSN) - } - if target.TargetTime != "" { - result += fmt.Sprintf( - "recovery_target_time = '%v'\n", - utils.ConvertToPostgresFormat(target.TargetTime)) - } - if target.TargetImmediate != nil && *target.TargetImmediate { - result += "recovery_target = immediate\n" - } - if target.Exclusive != nil && *target.Exclusive { - result += "recovery_target_inclusive = false\n" - } else { - result += "recovery_target_inclusive = true\n" - } - - return result -} - func init() { SchemeBuilder.Register(&Cluster{}, &ClusterList{}) } diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go deleted file mode 100644 index cb9b456696..0000000000 --- a/api/v1/cluster_webhook_test.go +++ /dev/null @@ -1,4878 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strings" - "time" - - storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/ptr" - - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("bootstrap methods validation", func() { - It("doesn't complain if there isn't a configuration", func() { - emptyCluster := &Cluster{} - result := emptyCluster.validateBootstrapMethod() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain if we are using initdb", func() { - initdbCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - }, - } - result := initdbCluster.validateBootstrapMethod() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain if we are using recovery", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, - }, - }, - } - result := recoveryCluster.validateBootstrapMethod() - Expect(result).To(BeEmpty()) - }) - - It("complains where there are two active bootstrap methods", func() { - invalidCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, - InitDB: &BootstrapInitDB{}, - }, - }, - } - result := invalidCluster.validateBootstrapMethod() - Expect(result).To(HaveLen(1)) - }) -}) - -var _ = Describe("certificates options validation", func() { - It("doesn't complain if there isn't a configuration", func() { - emptyCluster := &Cluster{} - result := emptyCluster.validateCerts() - Expect(result).To(BeEmpty()) - }) - It("doesn't complain if you specify some valid secret names", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Certificates: &CertificatesConfiguration{ - ServerCASecret: "test-server-ca", - ServerTLSSecret: "test-server-tls", - }, - }, - } - result := cluster.validateCerts() - Expect(result).To(BeEmpty()) - }) - It("does complain if you specify the TLS secret and not the CA", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Certificates: &CertificatesConfiguration{ - ServerTLSSecret: "test-server-tls", - }, - }, - } - result := cluster.validateCerts() - Expect(result).To(HaveLen(1)) - }) - It("does complain if you specify the TLS secret and AltDNSNames is not empty", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Certificates: &CertificatesConfiguration{ - ServerCASecret: "test-server-ca", - ServerTLSSecret: "test-server-tls", - ServerAltDNSNames: []string{"dns-name"}, - }, - }, - } - result := cluster.validateCerts() - Expect(result).To(HaveLen(1)) - }) -}) - -var _ = Describe("initdb options validation", func() { - It("doesn't complain if there isn't a configuration", func() { - emptyCluster := &Cluster{} - result := emptyCluster.validateInitDB() - Expect(result).To(BeEmpty()) - }) - - It("complains if you specify the database name but not the owner", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(HaveLen(1)) - }) - - It("complains if you specify the owner but not the database name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Owner: "app", - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(HaveLen(1)) - }) - - It("doesn't complain if you specify both database name and owner user", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(BeEmpty()) - }) - - It("complain if key is missing in the secretRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - SecretRefs: []SecretKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: "secret1"}, - }, - }, - }, - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(HaveLen(1)) - }) - - It("complain if name is missing in the secretRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - SecretRefs: []SecretKeySelector{ - { - Key: "key", - }, - }, - }, - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(HaveLen(1)) - }) - - It("complain if key is missing in the configMapRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - ConfigMapRefs: []ConfigMapKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: "configmap1"}, - }, - }, - }, - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(HaveLen(1)) - }) - - It("complain if name is missing in the configMapRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - ConfigMapRefs: []ConfigMapKeySelector{ - { - Key: "key", - }, - }, - }, - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(HaveLen(1)) - }) - - It("doesn't complain if configmapRefs and secretRefs are valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - ConfigMapRefs: []ConfigMapKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: "configmap1"}, - Key: "key", - }, - { - LocalObjectReference: LocalObjectReference{Name: "configmap2"}, - Key: "key", - }, - }, - SecretRefs: []SecretKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: "secret1"}, - Key: "key", - }, - { - LocalObjectReference: LocalObjectReference{Name: "secret2"}, - Key: "key", - }, - }, - }, - }, - }, - }, - } - - result := cluster.validateInitDB() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain if superuser secret it's empty", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, - } - - result := cluster.validateSuperuserSecret() - - Expect(result).To(BeEmpty()) - }) - - It("complains if superuser secret name it's empty", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - SuperuserSecret: &LocalObjectReference{ - Name: "", - }, - }, - } - - result := cluster.validateSuperuserSecret() - Expect(result).To(HaveLen(1)) - }) -}) - -var _ = Describe("cluster configuration", func() { - It("defaults to creating an application database", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) - }) - - It("defaults the owner user with the database name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "appdb", - }, - }, - }, - } - - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb")) - }) - - It("defaults to create an application database if recovery is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, - }, - }, - } - cluster.Default() - Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue()) - Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil()) - }) - - It("defaults the owner user with the database name for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Database: "appdb", - }, - }, - }, - } - - cluster.Default() - Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb")) - }) - - It("defaults to create an application database if pg_basebackup is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - }, - } - cluster.Default() - Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue()) - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil()) - }) - - It("defaults the owner user with the database name for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Database: "appdb", - }, - }, - }, - } - - cluster.Default() - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb")) - }) - - It("defaults the PostgreSQL configuration with parameters from the operator", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty()) - }) - - It("defaults the anti-affinity", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{}, - }, - } - cluster.Default() - Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred)) - Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil()) - }) -}) - -var _ = Describe("ImagePullPolicy validation", func() { - It("complains if the imagePullPolicy isn't valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImagePullPolicy: "wrong", - }, - } - - result := cluster.validateImagePullPolicy() - Expect(result).To(HaveLen(1)) - }) - It("does not complain if the imagePullPolicy is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImagePullPolicy: "Always", - }, - } - - result := cluster.validateImagePullPolicy() - Expect(result).To(BeEmpty()) - }) -}) - -var _ = Describe("Defaulting webhook", func() { - It("should fill the image name if isn't already set", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName)) - }) - - It("shouldn't set the image name if already present", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "test:13", - }, - } - cluster.Default() - Expect(cluster.Spec.ImageName).To(Equal("test:13")) - }) - - It("should setup the application database name", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) - }) - - It("should set the owner name as the database name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "test", - }, - }, - }, - } - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test")) - }) - - It("should not overwrite application database and owner settings", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "testdb", - Owner: "testuser", - }, - }, - }, - } - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser")) - }) -}) - -var _ = Describe("Image name validation", func() { - It("doesn't complain if the user simply accept the default", func() { - var cluster Cluster - Expect(cluster.validateImageName()).To(BeEmpty()) - - // Let's apply the defaulting webhook, too - cluster.Default() - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("complains when the 'latest' tag is detected", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:latest", - }, - } - Expect(cluster.validateImageName()).To(HaveLen(1)) - }) - - It("doesn't complain when a alpha tag is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:15alpha1", - }, - } - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("doesn't complain when a beta tag is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:15beta1", - }, - } - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("doesn't complain when a release candidate tag is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:15rc1", - }, - } - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("complains when only the sha is passed", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866", - }, - } - Expect(cluster.validateImageName()).To(HaveLen(1)) - }) - - It("doesn't complain if the tag is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("doesn't complain if the tag is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:14.4-1", - }, - } - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("doesn't complain if the tag is valid and has sha", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.4@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866", - }, - } - Expect(cluster.validateImageName()).To(BeEmpty()) - }) - - It("complain when the tag name is not a PostgreSQL version", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:test_12", - }, - } - Expect(cluster.validateImageName()).To(HaveLen(1)) - }) -}) - -var _ = DescribeTable("parsePostgresQuantityValue", - func(value string, parsedValue resource.Quantity, expectError bool) { - quantity, err := parsePostgresQuantityValue(value) - if !expectError { - Expect(quantity, err).Should(BeComparableTo(parsedValue)) - } else { - Expect(err).Should(HaveOccurred()) - } - }, - Entry("bare", "1", resource.MustParse("1Mi"), false), - Entry("B", "1B", resource.MustParse("1"), false), - Entry("kB", "1kB", resource.MustParse("1Ki"), false), - Entry("MB", "1MB", resource.MustParse("1Mi"), false), - Entry("GB", "1GB", resource.MustParse("1Gi"), false), - Entry("TB", "1TB", resource.MustParse("1Ti"), false), - Entry("spaceB", "1 B", resource.MustParse("1"), false), - Entry("spaceMB", "1 MB", resource.MustParse("1Mi"), false), - Entry("reject kb", "1kb", resource.Quantity{}, true), - Entry("reject Mb", "1Mb", resource.Quantity{}, true), - Entry("reject G", "1G", resource.Quantity{}, true), - Entry("reject random unit", "1random", resource.Quantity{}, true), - Entry("reject non-numeric", "non-numeric", resource.Quantity{}, true), -) - -var _ = Describe("configuration change validation", func() { - It("doesn't complain when the configuration is exactly the same", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - clusterNew := clusterOld - Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(BeEmpty()) - }) - - It("doesn't complain when we change a setting which is not fixed", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.4", - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "shared_buffers": "4G", - }, - }, - }, - } - Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(BeEmpty()) - }) - - It("complains when changing postgres major version and settings", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:10.5", - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "shared_buffers": "4G", - }, - }, - }, - } - Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(HaveLen(1)) - }) - - It("produces no error when WAL size settings are correct", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "80MB", - "max_wal_size": "1024", - }, - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "1500", - "max_wal_size": "2 GB", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "3Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "1.5GB", - "max_wal_size": "2000", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "max_wal_size": "1GB", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "100MB", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{}, - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - }) - - It("produces one complaint when min_wal_size is bigger than max_wal_size", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "1500", - "max_wal_size": "1GB", - }, - }, - StorageConfiguration: StorageConfiguration{ - Size: "2Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "2G", - "max_wal_size": "1GB", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "4Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - }) - - It("produces one complaint when max_wal_size is bigger than WAL storage", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "max_wal_size": "2GB", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "1G", - }, - StorageConfiguration: StorageConfiguration{ - Size: "4Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "80MB", - "max_wal_size": "1500", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "1G", - }, - StorageConfiguration: StorageConfiguration{ - Size: "4Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - }) - - It("produces two complaints when min_wal_size is bigger than WAL storage and max_wal_size", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "3GB", - "max_wal_size": "1GB", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(2)) - }) - - It("complains about invalid value for min_wal_size and max_wal_size", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "xxx", - "max_wal_size": "1GB", - }, - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "80", - "max_wal_size": "1Gb", - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - }) - - It("doesn't compare default values for min_wal_size and max_wal_size with WalStorage", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{}, - }, - WalStorage: &StorageConfiguration{ - Size: "100Mi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "min_wal_size": "1.5GB", // default for max_wal_size is 1GB - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "max_wal_size": "70M", // default for min_wal_size is 80M - }, - }, - WalStorage: &StorageConfiguration{ - Size: "2Gi", - }, - StorageConfiguration: StorageConfiguration{ - Size: "4Gi", - }, - }, - } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) - }) - - It("should detect an invalid `shared_buffers` value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "shared_buffers": "invalid", - }, - }, - }, - } - - Expect(cluster.validateConfiguration()).To(HaveLen(1)) - }) - - It("should reject minimal wal_level when backup is configured", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{ - BarmanCredentials: BarmanCredentials{ - AWS: &S3Credentials{}, - }, - }, - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - }, - }, - }, - } - Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(HaveLen(1)) - }) - - It("should allow replica wal_level when backup is configured", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{ - BarmanCredentials: BarmanCredentials{ - AWS: &S3Credentials{}, - }, - }, - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "replica", - }, - }, - }, - } - Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should allow logical wal_level when backup is configured", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{ - BarmanCredentials: BarmanCredentials{ - AWS: &S3Credentials{}, - }, - }, - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "logical", - }, - }, - }, - } - Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should reject minimal wal_level when instances is greater than one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 2, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - }, - }, - }, - } - - Expect(cluster.validateConfiguration()).To(HaveLen(1)) - }) - - It("should allow replica wal_level when instances is greater than one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 2, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "replica", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should allow logical wal_level when instances is greater than one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 2, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "logical", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should reject an unknown wal_level value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "test", - }, - }, - }, - } - - errs := cluster.validateConfiguration() - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Detail).To(ContainSubstring("unrecognized `wal_level` value - allowed values")) - }) - - It("should reject minimal if it is a replica cluster", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 1, - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - }, - }, - }, - } - Expect(cluster.IsReplica()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(HaveLen(1)) - }) - - It("should allow minimal wal_level with one instance and without archive mode", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should disallow minimal wal_level with one instance, without max_wal_senders being specified", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(HaveLen(1)) - }) - - It("should disallow changing wal_level to minimal for existing clusters", func() { - oldCluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "max_wal_senders": "0", - }, - }, - }, - } - oldCluster.setDefaults(true) - - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - }, - }, - }, - } - Expect(cluster.validateWALLevelChange(&oldCluster)).To(HaveLen(1)) - }) - - It("should allow retaining wal_level to minimal for existing clusters", func() { - oldCluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - }, - }, - }, - } - oldCluster.setDefaults(true) - - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_level": "minimal", - "max_wal_senders": "0", - "shared_buffers": "512MB", - }, - }, - }, - } - Expect(cluster.validateWALLevelChange(&oldCluster)).To(BeEmpty()) - }) - - Describe("wal_log_hints", func() { - It("should reject wal_log_hints set to an invalid value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_log_hints": "foo", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(HaveLen(1)) - }) - - It("should allow wal_log_hints set to off for clusters having just one instance", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_log_hints": "off", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should not allow wal_log_hints set to off for clusters having more than one instance", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 3, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_log_hints": "off", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).ToNot(BeEmpty()) - }) - - It("should allow wal_log_hints set to on for clusters having just one instance", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 1, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_log_hints": "on", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - - It("should not allow wal_log_hints set to on for clusters having more than one instance", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.SkipWalArchiving: "enabled", - }, - }, - Spec: ClusterSpec{ - Instances: 3, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "wal_log_hints": "true", - }, - }, - }, - } - Expect(cluster.validateConfiguration()).To(BeEmpty()) - }) - }) -}) - -var _ = Describe("validate image name change", func() { - Context("using image name", func() { - It("doesn't complain with no changes", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{}, - } - clusterNew := Cluster{ - Spec: ClusterSpec{}, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) - }) - - It("complains if it can't upgrade between mayor versions", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:12.0", - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:11.0", - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) - }) - - It("doesn't complain if image change is valid", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:12.1", - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:12.0", - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) - }) - }) - Context("using image catalog", func() { - It("complains on major upgrades", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 15, - }, - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 16, - }, - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) - }) - }) - Context("changing from imageName to imageCatalogRef", func() { - It("doesn't complain when the major is the same", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:16.1", - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 16, - }, - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) - }) - It("complains on major upgrades", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:15.1", - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 16, - }, - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) - }) - It("complains going from default imageName to different major imageCatalogRef", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{}, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 14, - }, - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) - }) - It("doesn't complain going from default imageName to same major imageCatalogRef", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{}, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 16, - }, - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) - }) - }) - - Context("changing from imageCatalogRef to imageName", func() { - It("doesn't complain when the major is the same", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 16, - }, - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:16.1", - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) - }) - It("complains on major upgrades", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 15, - }, - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageName: "postgres:16.1", - }, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) - }) - It("complains going from default imageName to different major imageCatalogRef", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 14, - }, - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{}, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) - }) - It("doesn't complain going from default imageName to same major imageCatalogRef", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ - TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - Name: "test", - Kind: "ImageCatalog", - }, - Major: 16, - }, - }, - } - clusterNew := Cluster{ - Spec: ClusterSpec{}, - } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) - }) - }) -}) - -var _ = Describe("recovery target", func() { - It("is mutually exclusive", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "", - TargetXID: "", - TargetName: "", - TargetLSN: "1/1", - TargetTime: "2021-09-01 10:22:47.000000+06", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - - It("Requires BackupID to perform PITR with TargetName", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - BackupID: "20220616T031500", - TargetTLI: "", - TargetXID: "", - TargetName: "restore_point_1", - TargetLSN: "", - TargetTime: "", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - It("Fails when no BackupID is provided to perform PITR with TargetXID", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - BackupID: "", - TargetTLI: "", - TargetXID: "1/1", - TargetName: "", - TargetLSN: "", - TargetTime: "", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - - It("TargetTime's format as `YYYY-MM-DD HH24:MI:SS.FF6TZH` is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "", - TargetXID: "", - TargetName: "", - TargetLSN: "", - TargetTime: "2021-09-01 10:22:47.000000+06", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6TZH:TZM` is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "", - TargetXID: "", - TargetName: "", - TargetLSN: "", - TargetTime: "2021-09-01 10:22:47.000000+06:00", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6 TZH:TZM` is invalid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "", - TargetXID: "", - TargetName: "", - TargetLSN: "", - TargetTime: "2021-09-01 10:22:47.000000 +06:00", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - - It("raises errors for invalid LSN", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "", - TargetXID: "", - TargetName: "", - TargetLSN: "28734982739847293874823974928738423/987429837498273498723984723", - TargetTime: "", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - - It("valid LSN", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "", - TargetXID: "", - TargetName: "", - TargetLSN: "1/1", - TargetTime: "", - TargetImmediate: nil, - Exclusive: nil, - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - It("can be specified", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTime: "2020-01-01 01:01:00", - }, - }, - }, - }, - } - - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - When("recoveryTLI is specified", func() { - It("allows 'latest'", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "latest", - }, - }, - }, - }, - } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - It("allows a positive integer", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "23", - }, - }, - }, - }, - } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) - }) - - It("prevents 0 value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "0", - }, - }, - }, - }, - } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - - It("prevents negative values", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "-5", - }, - }, - }, - }, - } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - - It("prevents everything else beside the empty string", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - TargetTLI: "I don't remember", - }, - }, - }, - }, - } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) - }) - }) -}) - -var _ = Describe("primary update strategy", func() { - It("allows 'unsupervised'", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategyUnsupervised, - Instances: 3, - }, - } - Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty()) - }) - - It("allows 'supervised'", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategySupervised, - Instances: 3, - }, - } - Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty()) - }) - - It("prevents 'supervised' for single-instance clusters", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategySupervised, - Instances: 1, - }, - } - Expect(cluster.validatePrimaryUpdateStrategy()).ToNot(BeEmpty()) - }) - - It("allows 'unsupervised' for single-instance clusters", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategyUnsupervised, - Instances: 1, - }, - } - Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty()) - }) - - It("prevents everything else", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: "maybe", - Instances: 3, - }, - } - Expect(cluster.validatePrimaryUpdateStrategy()).ToNot(BeEmpty()) - }) -}) - -var _ = Describe("Number of synchronous replicas", func() { - Context("new-style configuration", func() { - It("can't have both new-style configuration and legacy one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 3, - MinSyncReplicas: 1, - MaxSyncReplicas: 2, - PostgresConfiguration: PostgresConfiguration{ - Synchronous: &SynchronousReplicaConfiguration{ - Number: 2, - }, - }, - }, - } - Expect(cluster.validateConfiguration()).ToNot(BeEmpty()) - }) - }) - - Context("legacy configuration", func() { - It("should be a positive integer", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 3, - MaxSyncReplicas: -3, - }, - } - Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty()) - }) - - It("should not be equal than the number of replicas", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 3, - MaxSyncReplicas: 3, - }, - } - Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty()) - }) - - It("should not be greater than the number of replicas", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 3, - MaxSyncReplicas: 5, - }, - } - Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty()) - }) - - It("can be zero", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 3, - MaxSyncReplicas: 0, - }, - } - Expect(cluster.validateMaxSyncReplicas()).To(BeEmpty()) - }) - - It("can be lower than the number of replicas", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Instances: 3, - MaxSyncReplicas: 2, - }, - } - Expect(cluster.validateMaxSyncReplicas()).To(BeEmpty()) - }) - }) -}) - -var _ = Describe("storage configuration validation", func() { - It("complains if the size is being reduced", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - Size: "1G", - }, - }, - } - - clusterNew := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - Size: "512M", - }, - }, - } - - Expect(clusterNew.validateStorageChange(&clusterOld)).ToNot(BeEmpty()) - }) - - It("does not complain if nothing has been changed", func() { - one := "one" - clusterOld := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - Size: "1G", - StorageClass: &one, - }, - }, - } - - clusterNew := clusterOld.DeepCopy() - - Expect(clusterNew.validateStorageChange(&clusterOld)).To(BeEmpty()) - }) - - It("works fine is the size is being enlarged", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - Size: "8G", - }, - }, - } - - clusterNew := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - Size: "10G", - }, - }, - } - - Expect(clusterNew.validateStorageChange(&clusterOld)).To(BeEmpty()) - }) -}) - -var _ = Describe("Cluster name validation", func() { - It("should be a valid DNS label", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test.one", - }, - } - Expect(cluster.validateName()).ToNot(BeEmpty()) - }) - - It("should not be too long", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi", - }, - } - Expect(cluster.validateName()).ToNot(BeEmpty()) - }) - - It("should not raise errors when the name is ok", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "abcdefghi" + - "abcdefghi" + - "abcdefghi" + - "abcdefghi", - }, - } - Expect(cluster.validateName()).To(BeEmpty()) - }) - - It("should return errors when the name is not DNS-1035 compliant", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "4b96d026-a956-47eb-bae8-a99b840805c3", - }, - } - Expect(cluster.validateName()).NotTo(BeEmpty()) - }) - - It("should return errors when the name length is greater than 50", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: strings.Repeat("toomuchlong", 4) + "-" + "after4times", - }, - } - Expect(cluster.validateName()).NotTo(BeEmpty()) - }) - - It("should return errors when having a name with dots", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "wrong.name", - }, - } - Expect(cluster.validateName()).NotTo(BeEmpty()) - }) -}) - -var _ = Describe("validation of the list of external clusters", func() { - It("is correct when it's empty", func() { - cluster := Cluster{} - Expect(cluster.validateExternalClusters()).To(BeEmpty()) - }) - - It("complains when the list of clusters contains duplicates", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ - { - Name: "one", - ConnectionParameters: map[string]string{ - "dbname": "postgres", - }, - }, - { - Name: "one", - ConnectionParameters: map[string]string{ - "dbname": "postgres", - }, - }, - }, - }, - } - Expect(cluster.validateExternalClusters()).ToNot(BeEmpty()) - }) - - It("should not raise errors is the cluster name is unique", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ - { - Name: "one", - ConnectionParameters: map[string]string{ - "dbname": "postgres", - }, - }, - { - Name: "two", - ConnectionParameters: map[string]string{ - "dbname": "postgres", - }, - }, - }, - }, - } - Expect(cluster.validateExternalClusters()).To(BeEmpty()) - }) -}) - -var _ = Describe("validation of an external cluster", func() { - It("ensure that one of connectionParameters and barmanObjectStore is set", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ - {}, - }, - }, - } - Expect(cluster.validateExternalClusters()).To(Not(BeEmpty())) - - cluster.Spec.ExternalClusters[0].ConnectionParameters = map[string]string{ - "dbname": "postgres", - } - cluster.Spec.ExternalClusters[0].BarmanObjectStore = nil - Expect(cluster.validateExternalClusters()).To(BeEmpty()) - - cluster.Spec.ExternalClusters[0].ConnectionParameters = nil - cluster.Spec.ExternalClusters[0].BarmanObjectStore = &BarmanObjectStoreConfiguration{} - Expect(cluster.validateExternalClusters()).To(BeEmpty()) - }) -}) - -var _ = Describe("bootstrap base backup validation", func() { - It("complains if you specify the database name but not the owner for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Database: "app", - }, - }, - }, - } - - result := cluster.validatePgBaseBackupApplicationDatabase() - Expect(result).To(HaveLen(1)) - }) - - It("complains if you specify the owner but not the database name for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Owner: "app", - }, - }, - }, - } - - result := cluster.validatePgBaseBackupApplicationDatabase() - Expect(result).To(HaveLen(1)) - }) - - It("doesn't complain if you specify both database name and owner user for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Database: "app", - Owner: "app", - }, - }, - }, - } - - result := cluster.validatePgBaseBackupApplicationDatabase() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain if we are not bootstrapping using pg_basebackup", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{}, - }, - } - result := recoveryCluster.validateBootstrapPgBaseBackupSource() - Expect(result).To(BeEmpty()) - }) - - It("complain when the source cluster doesn't exist", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Source: "test", - }, - }, - }, - } - result := recoveryCluster.validateBootstrapPgBaseBackupSource() - Expect(result).ToNot(BeEmpty()) - }) -}) - -var _ = Describe("bootstrap recovery validation", func() { - It("complains if you specify the database name but not the owner for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Database: "app", - }, - }, - }, - } - - result := cluster.validateRecoveryApplicationDatabase() - Expect(result).To(HaveLen(1)) - }) - - It("complains if you specify the owner but not the database name for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Owner: "app", - }, - }, - }, - } - - result := cluster.validateRecoveryApplicationDatabase() - Expect(result).To(HaveLen(1)) - }) - - It("doesn't complain if you specify both database name and owner user for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Database: "app", - Owner: "app", - }, - }, - }, - } - - result := cluster.validateRecoveryApplicationDatabase() - Expect(result).To(BeEmpty()) - }) - - It("does not complain when bootstrap recovery source matches one of the names of external clusters", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Source: "test", - }, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - errorsList := recoveryCluster.validateBootstrapRecoverySource() - Expect(errorsList).To(BeEmpty()) - }) - - It("complains when bootstrap recovery source does not match one of the names of external clusters", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Source: "test", - }, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "another-test", - }, - }, - }, - } - errorsList := recoveryCluster.validateBootstrapRecoverySource() - Expect(errorsList).ToNot(BeEmpty()) - }) -}) - -var _ = Describe("toleration validation", func() { - It("doesn't complain if we provide a proper toleration", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - Tolerations: []corev1.Toleration{ - { - Key: "test", - Operator: "Exists", - Effect: "NoSchedule", - }, - }, - }, - }, - } - result := recoveryCluster.validateTolerations() - Expect(result).To(BeEmpty()) - }) - - It("complain when the toleration ", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - Tolerations: []corev1.Toleration{ - { - Key: "", - Operator: "Equal", - Effect: "NoSchedule", - }, - }, - }, - }, - } - result := recoveryCluster.validateTolerations() - Expect(result).ToNot(BeEmpty()) - }) -}) - -var _ = Describe("validate anti-affinity", func() { - t := true - f := false - It("doesn't complain if we provide an empty affinity section", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{}, - }, - } - result := cluster.validateAntiAffinity() - Expect(result).To(BeEmpty()) - }) - It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, - PodAntiAffinityType: "required", - }, - }, - } - result := cluster.validateAntiAffinity() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity disabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &f, - PodAntiAffinityType: "required", - }, - }, - } - result := recoveryCluster.validateAntiAffinity() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, - PodAntiAffinityType: "preferred", - }, - }, - } - result := recoveryCluster.validateAntiAffinity() - Expect(result).To(BeEmpty()) - }) - It("doesn't complain if we provide a proper PodAntiAffinity default with anti-affinity enabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, - PodAntiAffinityType: "", - }, - }, - } - result := recoveryCluster.validateAntiAffinity() - Expect(result).To(BeEmpty()) - }) - - It("complains if we provide a wrong PodAntiAffinity with anti-affinity disabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &f, - PodAntiAffinityType: "error", - }, - }, - } - result := recoveryCluster.validateAntiAffinity() - Expect(result).NotTo(BeEmpty()) - }) - - It("complains if we provide a wrong PodAntiAffinity with anti-affinity enabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, - PodAntiAffinityType: "error", - }, - }, - } - result := recoveryCluster.validateAntiAffinity() - Expect(result).NotTo(BeEmpty()) - }) -}) - -var _ = Describe("validation of the list of external clusters", func() { - It("is correct when it's empty", func() { - cluster := Cluster{} - Expect(cluster.validateExternalClusters()).To(BeEmpty()) - }) - - It("complains when the list of servers contains duplicates", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ - { - Name: "one", - ConnectionParameters: map[string]string{}, - }, - { - Name: "one", - ConnectionParameters: map[string]string{}, - }, - }, - }, - } - Expect(cluster.validateExternalClusters()).ToNot(BeEmpty()) - }) - - It("should not raise errors is the server name is unique", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ - { - Name: "one", - ConnectionParameters: map[string]string{}, - }, - { - Name: "two", - ConnectionParameters: map[string]string{}, - }, - }, - }, - } - Expect(cluster.validateExternalClusters()).To(BeEmpty()) - }) -}) - -var _ = Describe("bootstrap base backup validation", func() { - It("complain when the source cluster doesn't exist", func() { - bootstrap := BootstrapConfiguration{} - bpb := BootstrapPgBaseBackup{Source: "test"} - bootstrap.PgBaseBackup = &bpb - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Source: "test", - }, - }, - }, - } - result := recoveryCluster.validateBootstrapPgBaseBackupSource() - Expect(result).ToNot(BeEmpty()) - }) -}) - -var _ = Describe("unix permissions identifiers change validation", func() { - It("complains if the PostgresGID is changed", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - PostgresGID: defaultPostgresGID, - }, - } - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresGID: 53, - }, - } - Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).NotTo(BeEmpty()) - }) - - It("complains if the PostgresUID is changed", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - PostgresUID: defaultPostgresUID, - }, - } - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresGID: 74, - }, - } - Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).NotTo(BeEmpty()) - }) - - It("should not complain if the values havn't been changed", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - PostgresUID: 74, - PostgresGID: 76, - }, - } - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresUID: 74, - PostgresGID: 76, - }, - } - Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).To(BeEmpty()) - }) -}) - -var _ = Describe("promotion token validation", func() { - It("complains if the replica token is not formatted in base64", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(false), - Source: "test", - PromotionToken: "this-is-a-wrong-token", - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).ToNot(BeEmpty()) - }) - - It("complains if the replica token is not valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(false), - Source: "test", - PromotionToken: base64.StdEncoding.EncodeToString([]byte("{}")), - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).ToNot(BeEmpty()) - }) - - It("doesn't complain if the replica token is valid", func() { - tokenContent := utils.PgControldataTokenContent{ - LatestCheckpointTimelineID: "3", - REDOWALFile: "this-wal-file", - DatabaseSystemIdentifier: "231231212", - LatestCheckpointREDOLocation: "33322232", - TimeOfLatestCheckpoint: "we don't know", - OperatorVersion: "version info", - } - jsonToken, err := json.Marshal(tokenContent) - Expect(err).ToNot(HaveOccurred()) - - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(false), - Source: "test", - PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).To(BeEmpty()) - }) - - It("complains if the token is set on a replica cluster (enabled)", func() { - tokenContent := utils.PgControldataTokenContent{ - LatestCheckpointTimelineID: "1", - REDOWALFile: "0000000100000001000000A1", - DatabaseSystemIdentifier: "231231212", - LatestCheckpointREDOLocation: "0/1000000", - TimeOfLatestCheckpoint: "we don't know", - OperatorVersion: "version info", - } - jsonToken, err := json.Marshal(tokenContent) - Expect(err).ToNot(HaveOccurred()) - - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).NotTo(BeEmpty()) - }) - - It("complains if the token is set on a replica cluster (primary, default name)", func() { - tokenContent := utils.PgControldataTokenContent{ - LatestCheckpointTimelineID: "1", - REDOWALFile: "0000000100000001000000A1", - DatabaseSystemIdentifier: "231231212", - LatestCheckpointREDOLocation: "0/1000000", - TimeOfLatestCheckpoint: "we don't know", - OperatorVersion: "version info", - } - jsonToken, err := json.Marshal(tokenContent) - Expect(err).ToNot(HaveOccurred()) - - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test2", - }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Primary: "test", - Source: "test", - PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).NotTo(BeEmpty()) - }) - - It("complains if the token is set on a replica cluster (primary, self)", func() { - tokenContent := utils.PgControldataTokenContent{ - LatestCheckpointTimelineID: "1", - REDOWALFile: "0000000100000001000000A1", - DatabaseSystemIdentifier: "231231212", - LatestCheckpointREDOLocation: "0/1000000", - TimeOfLatestCheckpoint: "we don't know", - OperatorVersion: "version info", - } - jsonToken, err := json.Marshal(tokenContent) - Expect(err).ToNot(HaveOccurred()) - - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Primary: "test", - Self: "test2", - Source: "test", - PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).NotTo(BeEmpty()) - }) - - It("complains it the token is set when minApplyDelay is being used", func() { - tokenContent := utils.PgControldataTokenContent{ - LatestCheckpointTimelineID: "1", - REDOWALFile: "0000000100000001000000A1", - DatabaseSystemIdentifier: "231231212", - LatestCheckpointREDOLocation: "0/1000000", - TimeOfLatestCheckpoint: "we don't know", - OperatorVersion: "version info", - } - jsonToken, err := json.Marshal(tokenContent) - Expect(err).ToNot(HaveOccurred()) - - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Primary: "test", - Self: "test", - Source: "test", - PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), - MinApplyDelay: &metav1.Duration{ - Duration: 1 * time.Hour, - }, - }, - }, - } - - result := cluster.validatePromotionToken() - Expect(result).NotTo(BeEmpty()) - }) -}) - -var _ = Describe("replica mode validation", func() { - It("complains if the bootstrap method is not specified", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - Expect(cluster.validateReplicaMode()).ToNot(BeEmpty()) - }) - - It("complains if the initdb bootstrap method is used", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - Expect(cluster.validateReplicaMode()).ToNot(BeEmpty()) - }) - - It("doesn't complain about initdb if we enable the external cluster on an existing cluster", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "existing", - }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - result := cluster.validateReplicaMode() - Expect(result).To(BeEmpty()) - }) - - It("should complain if enabled is set to off during a transition", func() { - old := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "existing", - }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - Status: ClusterStatus{ - SwitchReplicaClusterStatus: SwitchReplicaClusterStatus{ - InProgress: true, - }, - }, - } - - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "existing", - }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(false), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - Status: ClusterStatus{ - SwitchReplicaClusterStatus: SwitchReplicaClusterStatus{ - InProgress: true, - }, - }, - } - - result := cluster.validateReplicaClusterChange(old) - Expect(result).To(HaveLen(1)) - Expect(result[0].Type).To(Equal(field.ErrorTypeForbidden)) - Expect(result[0].Field).To(Equal("spec.replica.enabled")) - }) - - It("is valid when the pg_basebackup bootstrap option is used", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - result := cluster.validateReplicaMode() - Expect(result).To(BeEmpty()) - }) - - It("is valid when the restore bootstrap option is used", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - result := cluster.validateReplicaMode() - Expect(result).To(BeEmpty()) - }) - - It("complains when the primary field is used with the enabled field", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Primary: "toast", - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - ExternalClusters: []ExternalCluster{}, - }, - } - result := cluster.validateReplicaMode() - Expect(result).ToNot(BeEmpty()) - }) - - It("doesn't complain when the enabled field is not specified", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-2", - }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Primary: "test", - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - result := cluster.validateReplicaMode() - Expect(result).To(BeEmpty()) - }) - - It("doesn't complain when creating a new primary cluster with the replication stanza set", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Primary: "test", - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - result := cluster.validateReplicaMode() - Expect(result).To(BeEmpty()) - }) -}) - -var _ = Describe("validate the replica cluster external clusters", func() { - It("complains when the external cluster doesn't exist (source)", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Enabled: ptr.To(true), - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - ExternalClusters: []ExternalCluster{}, - }, - } - - cluster.Spec.Bootstrap.PgBaseBackup = nil - result := cluster.validateReplicaClusterExternalClusters() - Expect(result).ToNot(BeEmpty()) - }) - - It("complains when the external cluster doesn't exist (primary)", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Primary: "test2", - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - - result := cluster.validateReplicaClusterExternalClusters() - Expect(result).ToNot(BeEmpty()) - }) - - It("complains when the external cluster doesn't exist (self)", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ - Self: "test2", - Primary: "test", - Source: "test", - }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - ExternalClusters: []ExternalCluster{ - { - Name: "test", - }, - }, - }, - } - - result := cluster.validateReplicaClusterExternalClusters() - Expect(result).ToNot(BeEmpty()) - }) -}) - -var _ = Describe("Validation changes", func() { - It("doesn't complain if given old cluster is nil", func() { - newCluster := &Cluster{} - err := newCluster.ValidateChanges(nil) - Expect(err).To(BeNil()) - }) -}) - -var _ = Describe("Backup validation", func() { - It("complain if there's no credentials", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{}, - }, - }, - } - err := cluster.validateBackupConfiguration() - Expect(err).To(HaveLen(1)) - }) -}) - -var _ = Describe("Backup retention policy validation", func() { - It("doesn't complain if given policy is not provided", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{}, - }, - } - err := cluster.validateRetentionPolicy() - Expect(err).To(BeEmpty()) - }) - - It("doesn't complain if given policy is valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - RetentionPolicy: "90d", - }, - }, - } - err := cluster.validateRetentionPolicy() - Expect(err).To(BeEmpty()) - }) - - It("complain if a given policy is not valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - RetentionPolicy: "09", - }, - }, - } - err := cluster.validateRetentionPolicy() - Expect(err).To(HaveLen(1)) - }) -}) - -var _ = Describe("Default monitoring queries", func() { - It("correctly set the default monitoring queries configmap and secret when none is already specified", func() { - cluster := &Cluster{} - cluster.defaultMonitoringQueries(&configuration.Data{ - MonitoringQueriesSecret: "test-secret", - MonitoringQueriesConfigmap: "test-configmap", - }) - Expect(cluster.Spec.Monitoring).NotTo(BeNil()) - Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) - Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap). - To(ContainElement(ConfigMapKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: DefaultMonitoringKey, - })) - Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) - Expect(cluster.Spec.Monitoring.CustomQueriesSecret). - To(ContainElement(SecretKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, - Key: DefaultMonitoringKey, - })) - }) - testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{ - CustomQueriesConfigMap: []ConfigMapKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: "test2", - }, - }, - CustomQueriesSecret: []SecretKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: "test3", - }, - }, - }}} - It("correctly set the default monitoring queries configmap when other metrics are already specified", func() { - modifiedCluster := testCluster.DeepCopy() - modifiedCluster.defaultMonitoringQueries(&configuration.Data{ - MonitoringQueriesConfigmap: "test-configmap", - }) - - Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). - To(ContainElement(ConfigMapKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: "test2", - })) - - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). - To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret)) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). - To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) - }) - It("correctly set the default monitoring queries secret when other metrics are already specified", func() { - modifiedCluster := testCluster.DeepCopy() - modifiedCluster.defaultMonitoringQueries(&configuration.Data{ - MonitoringQueriesSecret: "test-secret", - }) - - Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). - To(ContainElement(SecretKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, - Key: "test3", - })) - - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). - To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). - To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret)) - }) -}) - -var _ = Describe("validation of imports", func() { - It("rejects unrecognized import type", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: "fooBar", - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("rejects microservice import with roles", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, - Databases: []string{"foo"}, - Roles: []string{"bar"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("rejects microservice import without exactly one database", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, - Databases: []string{"foo", "bar"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("rejects microservice import with a wildcard on the database name", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, - Databases: []string{"*foo"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("accepts microservice import when well specified", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, - Databases: []string{"foo"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(BeEmpty()) - }) - - It("rejects monolith import with no databases", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, - Databases: []string{}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("rejects monolith import with PostImport Application SQL", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, - Databases: []string{"foo"}, - PostImportApplicationSQL: []string{"select * from bar"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("rejects monolith import with wildcards alongside specific values", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, - Databases: []string{"bar", "*"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(HaveLen(1)) - - cluster = &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, - Databases: []string{"foo"}, - Roles: []string{"baz", "*"}, - }, - }, - }, - }, - } - - result = cluster.validateImport() - Expect(result).To(HaveLen(1)) - }) - - It("accepts monolith import with proper values", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, - Databases: []string{"foo"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(BeEmpty()) - }) - - It("accepts monolith import with wildcards", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "app", - Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, - Databases: []string{"*"}, - Roles: []string{"*"}, - }, - }, - }, - }, - } - - result := cluster.validateImport() - Expect(result).To(BeEmpty()) - }) -}) - -var _ = Describe("validation of replication slots configuration", func() { - It("prevents using replication slots on PostgreSQL 10 and older", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: "ghcr.io/cloudnative-pg/postgresql:10.5", - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - }, - UpdateInterval: 0, - }, - }, - } - cluster.Default() - - result := cluster.validateReplicationSlots() - Expect(result).To(HaveLen(1)) - }) - - It("can be enabled on the default PostgreSQL image", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - }, - UpdateInterval: 0, - }, - }, - } - cluster.Default() - - result := cluster.validateReplicationSlots() - Expect(result).To(BeEmpty()) - }) - - It("set replicationSlots by default", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - }, - } - cluster.Default() - Expect(cluster.Spec.ReplicationSlots).ToNot(BeNil()) - Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil()) - Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue())) - - result := cluster.validateReplicationSlots() - Expect(result).To(BeEmpty()) - }) - - It("set replicationSlots.highAvailability by default", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - UpdateInterval: 30, - }, - }, - } - cluster.Default() - Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil()) - Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue())) - - result := cluster.validateReplicationSlots() - Expect(result).To(BeEmpty()) - }) - - It("allows enabling replication slots on the fly", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(false), - }, - }, - }, - } - oldCluster.Default() - - newCluster := oldCluster.DeepCopy() - newCluster.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_test_", - }, - } - - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(BeEmpty()) - }) - - It("prevents changing the slot prefix while replication slots are enabled", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_test_", - }, - }, - }, - } - oldCluster.Default() - - newCluster := oldCluster.DeepCopy() - newCluster.Spec.ReplicationSlots.HighAvailability.SlotPrefix = "_toast_" - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(HaveLen(1)) - }) - - It("prevents removing the replication slot section when replication slots are enabled", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_test_", - }, - }, - }, - } - oldCluster.Default() - - newCluster := oldCluster.DeepCopy() - newCluster.Spec.ReplicationSlots = nil - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(HaveLen(1)) - }) - - It("allows disabling the replication slots", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_test_", - }, - }, - }, - } - oldCluster.Default() - - newCluster := oldCluster.DeepCopy() - newCluster.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(false) - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(BeEmpty()) - }) - - It("should return an error when SynchronizeReplicasConfiguration is not nil and has invalid regex", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - SynchronizeReplicas: &SynchronizeReplicasConfiguration{ - ExcludePatterns: []string{"([a-zA-Z]+"}, - }, - }, - }, - } - errors := cluster.validateReplicationSlots() - Expect(errors).To(HaveLen(1)) - Expect(errors[0].Detail).To(Equal("Cannot configure synchronizeReplicas. Invalid regexes were found")) - }) - - It("should not return an error when SynchronizeReplicasConfiguration is not nil and regex is valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - SynchronizeReplicas: &SynchronizeReplicasConfiguration{ - ExcludePatterns: []string{"validpattern"}, - }, - }, - }, - } - errors := cluster.validateReplicationSlots() - Expect(errors).To(BeEmpty()) - }) - - It("should not return an error when SynchronizeReplicasConfiguration is nil", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - SynchronizeReplicas: nil, - }, - }, - } - errors := cluster.validateReplicationSlots() - Expect(errors).To(BeEmpty()) - }) -}) - -var _ = Describe("Environment variables validation", func() { - When("an environment variable is given", func() { - It("detects if it is valid", func() { - Expect(isReservedEnvironmentVariable("PGDATA")).To(BeTrue()) - }) - - It("detects if it is not valid", func() { - Expect(isReservedEnvironmentVariable("LC_ALL")).To(BeFalse()) - }) - }) - - When("a ClusterSpec is given", func() { - It("detects if the environment variable list is correct", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Env: []corev1.EnvVar{ - { - Name: "TZ", - Value: "Europe/Rome", - }, - }, - }, - } - - Expect(cluster.validateEnv()).To(BeEmpty()) - }) - - It("detects if the environment variable list contains a reserved variable", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Env: []corev1.EnvVar{ - { - Name: "TZ", - Value: "Europe/Rome", - }, - { - Name: "PGDATA", - Value: "/tmp", - }, - }, - }, - } - - Expect(cluster.validateEnv()).To(HaveLen(1)) - }) - }) -}) - -var _ = Describe("Storage configuration validation", func() { - When("a ClusterSpec is given", func() { - It("produces one error if storage is not set at all", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{}, - }, - } - Expect(cluster.validateStorageSize()).To(HaveLen(1)) - }) - - It("succeeds if storage size is set", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - Size: "1G", - }, - }, - } - Expect(cluster.validateStorageSize()).To(BeEmpty()) - }) - - It("succeeds if storage is not set but a pvc template specifies storage", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ - PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ - Resources: corev1.VolumeResourceRequirements{ - Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, - }, - }, - }, - }, - } - Expect(cluster.validateStorageSize()).To(BeEmpty()) - }) - }) -}) - -var _ = Describe("Ephemeral volume configuration validation", func() { - It("succeeds if no ephemeral configuration is present", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, - } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) - }) - - It("succeeds if ephemeralVolumeSource is set", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, - }, - } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) - }) - - It("succeeds if ephemeralVolumesSizeLimit.temporaryData is set", func() { - onegi := resource.MustParse("1Gi") - cluster := Cluster{ - Spec: ClusterSpec{ - EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{ - TemporaryData: &onegi, - }, - }, - } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) - }) - - It("succeeds if ephemeralVolumeSource and ephemeralVolumesSizeLimit.shm are set", func() { - onegi := resource.MustParse("1Gi") - cluster := Cluster{ - Spec: ClusterSpec{ - EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, - EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{ - Shm: &onegi, - }, - }, - } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) - }) - - It("produces one error if conflicting ephemeral storage options are set", func() { - onegi := resource.MustParse("1Gi") - cluster := Cluster{ - Spec: ClusterSpec{ - EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, - EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{ - TemporaryData: &onegi, - }, - }, - } - Expect(cluster.validateEphemeralVolumeSource()).To(HaveLen(1)) - }) -}) - -var _ = Describe("Role management validation", func() { - It("should succeed if there is no management stanza", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, - } - Expect(cluster.validateManagedRoles()).To(BeEmpty()) - }) - - It("should succeed if the role defined is not reserved", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ - { - Name: "non-conflicting", - }, - }, - }, - }, - } - Expect(cluster.validateManagedRoles()).To(BeEmpty()) - }) - - It("should produce an error on invalid connection limit", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ - { - Name: "non-conflicting", - ConnectionLimit: -3, - }, - }, - }, - }, - } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) - }) - - It("should produce an error if the role is reserved", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ - { - Name: "postgres", - }, - }, - }, - }, - } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) - }) - - It("should produce two errors if the role is reserved and the connection limit is invalid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ - { - Name: "postgres", - ConnectionLimit: -3, - }, - }, - }, - }, - } - Expect(cluster.validateManagedRoles()).To(HaveLen(2)) - }) - - It("should produce an error if we define two roles with the same name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ - { - Name: "my_test", - ConnectionLimit: -1, - }, - { - Name: "my_test", - Superuser: true, - BypassRLS: true, - ConnectionLimit: -1, - }, - }, - }, - }, - } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) - }) - It("should produce an error if we have a password secret AND DisablePassword in a role", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ - { - Name: "my_test", - Superuser: true, - BypassRLS: true, - DisablePassword: true, - PasswordSecret: &LocalObjectReference{ - Name: "myPassword", - }, - ConnectionLimit: -1, - }, - }, - }, - }, - } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) - }) -}) - -var _ = Describe("Managed Extensions validation", func() { - It("should succeed if no extension is enabled", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, - } - Expect(cluster.validateManagedExtensions()).To(BeEmpty()) - }) - - It("should fail if hot_standby_feedback is set to an invalid value", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - }, - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "hot_standby_feedback": "foo", - "pg_failover_slots.synchronize_slot_names": "my_slot", - }, - }, - }, - } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2)) - }) - - It("should succeed if pg_failover_slots and its prerequisites are enabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - }, - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "hot_standby_feedback": "on", - "pg_failover_slots.synchronize_slot_names": "my_slot", - }, - }, - }, - } - Expect(cluster.validatePgFailoverSlots()).To(BeEmpty()) - }) - - It("should produce two errors if pg_failover_slots is enabled and its prerequisites are disabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "pg_failover_slots.synchronize_slot_names": "my_slot", - }, - }, - }, - } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2)) - }) - - It("should produce an error if pg_failover_slots is enabled and HA slots are disabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "hot_standby_feedback": "yes", - "pg_failover_slots.synchronize_slot_names": "my_slot", - }, - }, - }, - } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(1)) - }) - - It("should produce an error if pg_failover_slots is enabled and hot_standby_feedback is disabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - }, - }, - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{ - "pg_failover_slots.synchronize_slot_names": "my_slot", - }, - }, - }, - } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(1)) - }) -}) - -var _ = Describe("Recovery from volume snapshot validation", func() { - clusterFromRecovery := func(recovery *BootstrapRecovery) *Cluster { - return &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: recovery, - }, - WalStorage: &StorageConfiguration{}, - }, - } - } - - It("should produce an error when defining two recovery sources at the same time", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Source: "sourceName", - Backup: &BackupSource{}, - VolumeSnapshots: &DataSource{}, - }, - }, - }, - } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1)) - }) - - It("should produce an error when defining a backupID while recovering using a DataSource", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ - BackupID: "20220616T031500", - }, - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(""), - Kind: "PersistentVolumeClaim", - Name: "pgdata", - }, - }, - }, - }, - }, - } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1)) - }) - - It("should produce an error when asking to recovery WALs from a snapshot without having storage for it", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgdata", - }, - WalStorage: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgwal", - }, - }, - }, - }, - }, - } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1)) - }) - - It("should not produce an error when the configuration is sound", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgdata", - }, - WalStorage: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgwal", - }, - }, - }, - }, - WalStorage: &StorageConfiguration{}, - }, - } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) - }) - - It("accepts recovery from a VolumeSnapshot", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: VolumeSnapshotKind, - Name: "pgdata", - }, - WalStorage: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: VolumeSnapshotKind, - Name: "pgwal", - }, - }, - }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) - }) - - It("accepts recovery from a VolumeSnapshot, while restoring WALs from an object store", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: VolumeSnapshotKind, - Name: "pgdata", - }, - }, - - Source: "pg-cluster", - }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) - }) - - When("using an nil apiGroup", func() { - It("accepts recovery from a PersistentVolumeClaim", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgdata", - }, - WalStorage: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgwal", - }, - }, - }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) - }) - }) - - When("using an empty apiGroup", func() { - It("accepts recovery from a PersistentVolumeClaim", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgdata", - }, - WalStorage: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: "VolumeSnapshot", - Name: "pgwal", - }, - }, - }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) - }) - }) - - It("prevent recovery from other Objects", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ - Storage: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(""), - Kind: "Secret", - Name: "pgdata", - }, - WalStorage: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To(""), - Kind: "ConfigMap", - Name: "pgwal", - }, - }, - }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(2)) - }) -}) - -var _ = Describe("validateResources", func() { - var cluster *Cluster - - BeforeEach(func() { - cluster = &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ - Parameters: map[string]string{}, - }, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{}, - Limits: map[corev1.ResourceName]resource.Quantity{}, - }, - }, - } - }) - - It("returns an error when the CPU request is greater than CPU limit", func() { - cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") - cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") - - errors := cluster.validateResources() - Expect(errors).To(HaveLen(1)) - Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) - }) - - It("returns an error when the Memory request is greater than Memory limit", func() { - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") - cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - - errors := cluster.validateResources() - Expect(errors).To(HaveLen(1)) - Expect(errors[0].Detail).To(Equal("Memory request is greater than the limit")) - }) - - It("returns no error when the ephemeral storage request is correctly set", func() { - cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("1") - cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") - - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns an error when the ephemeral storage request is greater than ephemeral storage limit", func() { - cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2") - cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") - - errors := cluster.validateResources() - Expect(errors).To(HaveLen(1)) - Expect(errors[0].Detail).To(Equal("Ephemeral storage request is greater than the limit")) - }) - - It("returns three errors when CPU, Memory, and ephemeral storage requests are greater than limits", func() { - cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") - cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") - cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2") - cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") - - errors := cluster.validateResources() - Expect(errors).To(HaveLen(3)) - Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) - Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit")) - Expect(errors[2].Detail).To(Equal("Ephemeral storage request is greater than the limit")) - }) - - It("returns two errors when both CPU and Memory requests are greater than their limits", func() { - cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") - cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") - cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - - errors := cluster.validateResources() - Expect(errors).To(HaveLen(2)) - Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) - Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit")) - }) - - It("returns no errors when both CPU and Memory requests are less than or equal to their limits", func() { - cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("1") - cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("2") - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") - cluster.Spec.Resources.Limits["memory"] = resource.MustParse("2Gi") - - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns no errors when CPU request is set but limit is nil", func() { - cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("1") - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns no errors when CPU limit is set but request is nil", func() { - cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns no errors when Memory request is set but limit is nil", func() { - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns no errors when Memory limit is set but request is nil", func() { - cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns an error when memoryRequest is less than shared_buffers in kB", func() { - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") - cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000000kB" - errors := cluster.validateResources() - Expect(errors).To(HaveLen(1)) - Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) - }) - - It("returns an error when memoryRequest is less than shared_buffers in MB", func() { - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1000Mi") - cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000MB" - errors := cluster.validateResources() - Expect(errors).To(HaveLen(1)) - Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) - }) - - It("returns no errors when memoryRequest is greater than or equal to shared_buffers in GB", func() { - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") - cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) - - It("returns no errors when shared_buffers is in a format that can't be parsed", func() { - cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") - cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "invalid_value" - errors := cluster.validateResources() - Expect(errors).To(BeEmpty()) - }) -}) - -var _ = Describe("Tablespaces validation", func() { - createFakeTemporaryTbsConf := func(name string) TablespaceConfiguration { - return TablespaceConfiguration{ - Name: name, - Storage: StorageConfiguration{ - Size: "10Gi", - }, - } - } - - It("should succeed if there is no tablespaces section", func() { - cluster := Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - }, - } - Expect(cluster.Validate()).To(BeEmpty()) - }) - - It("should succeed if the tablespaces are ok", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("my_tablespace"), - }, - }, - } - Expect(cluster.Validate()).To(BeEmpty()) - }) - - It("should produce an error if the tablespace name is too long", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - // each repetition is 14 char long, so 5x14 = 70 char > postgres limit - createFakeTemporaryTbsConf("my_tablespace1my_tablespace2my_tablespace3my_tablespace4my_tablespace5"), - }, - }, - } - Expect(cluster.Validate()).To(HaveLen(1)) - }) - - It("should produce an error if the tablespace name is reserved by Postgres", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("pg_foo"), - }, - }, - } - Expect(cluster.Validate()).To(HaveLen(1)) - }) - - It("should produce an error if the tablespace name is not valid", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - // each repetition is 14 char long, so 5x14 = 70 char > postgres limit - createFakeTemporaryTbsConf("my-^&sdf;"), - }, - }, - } - Expect(cluster.Validate()).To(HaveLen(1)) - }) - - It("should produce an error if there are duplicate tablespaces", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("my_tablespace"), - createFakeTemporaryTbsConf("my_TAblespace"), - createFakeTemporaryTbsConf("another"), - }, - }, - } - Expect(cluster.Validate()).To(HaveLen(1)) - }) - - It("should produce an error if the storage configured for the tablespace is invalid", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - // each repetition is 14 char long, so 5x14 = 70 char > postgres limit - { - Name: "my_tablespace1", - Storage: StorageConfiguration{ - Size: "10Gibberish", - }, - }, - }, - }, - } - Expect(cluster.Validate()).To(HaveLen(1)) - }) - - It("should produce two errors if two tablespaces have errors", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - // each repetition is 14 char long, so 5x14 = 70 char > postgres limit - { - Name: "my_tablespace1", - Storage: StorageConfiguration{ - Size: "10Gibberish", - }, - }, - // each repetition is 14 char long, so 5x14 = 70 char > postgres limit - createFakeTemporaryTbsConf("my_tablespace1my_tablespace2my_tablespace3my_tablespace4my_tablespace5"), - }, - }, - } - Expect(cluster.Validate()).To(HaveLen(2)) - }) - - It("should produce an error if the tablespaces section is deleted", func() { - oldCluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("my-tablespace1"), - }, - }, - } - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - }, - } - Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1)) - }) - - It("should produce an error if a tablespace is deleted", func() { - oldCluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("my-tablespace1"), - createFakeTemporaryTbsConf("my-tablespace2"), - }, - }, - } - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("my-tablespace1"), - }, - }, - } - Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1)) - }) - - It("should produce an error if a tablespace is reduced in size", func() { - oldCluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - createFakeTemporaryTbsConf("my-tablespace1"), - }, - }, - } - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - { - Name: "my-tablespace1", - Storage: StorageConfiguration{ - Size: "9Gi", - }, - }, - }, - }, - } - Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1)) - }) - - It("should not complain when the backup section refers to a tbs that is defined", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - { - Name: "my-tablespace1", - Storage: StorageConfiguration{ - Size: "9Gi", - }, - }, - }, - Backup: &BackupConfiguration{ - VolumeSnapshot: &VolumeSnapshotConfiguration{ - TablespaceClassName: map[string]string{ - "my-tablespace1": "random", - }, - }, - }, - }, - } - Expect(cluster.validateTablespaceBackupSnapshot()).To(BeEmpty()) - }) - - It("should complain when the backup section refers to a tbs that is not defined", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - }, - Spec: ClusterSpec{ - Instances: 3, - StorageConfiguration: StorageConfiguration{ - Size: "10Gi", - }, - Tablespaces: []TablespaceConfiguration{ - { - Name: "my-tablespace1", - Storage: StorageConfiguration{ - Size: "9Gi", - }, - }, - }, - Backup: &BackupConfiguration{ - VolumeSnapshot: &VolumeSnapshotConfiguration{ - TablespaceClassName: map[string]string{ - "not-present": "random", - }, - }, - }, - }, - } - Expect(cluster.validateTablespaceBackupSnapshot()).To(HaveLen(1)) - }) -}) - -var _ = Describe("Validate hibernation", func() { - It("should succeed if hibernation is set to 'on'", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOn), - }, - }, - } - Expect(cluster.validateHibernationAnnotation()).To(BeEmpty()) - }) - - It("should succeed if hibernation is set to 'off'", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOff), - }, - }, - } - Expect(cluster.validateHibernationAnnotation()).To(BeEmpty()) - }) - - It("should fail if hibernation is set to an invalid value", func() { - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - utils.HibernationAnnotationName: "", - }, - }, - } - Expect(cluster.validateHibernationAnnotation()).To(HaveLen(1)) - }) -}) - -var _ = Describe("validateManagedServices", func() { - var cluster *Cluster - - BeforeEach(func() { - cluster = &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Services: &ManagedServices{ - Additional: []ManagedService{}, - }, - }, - }, - } - }) - - Context("when Managed or Services is nil", func() { - It("should return no errors", func() { - cluster.Spec.Managed = nil - Expect(cluster.validateManagedServices()).To(BeNil()) - - cluster.Spec.Managed = &ManagedConfiguration{} - cluster.Spec.Managed.Services = nil - Expect(cluster.validateManagedServices()).To(BeNil()) - }) - }) - - Context("when there are no duplicate names", func() { - It("should return no errors", func() { - cluster.Spec.Managed.Services.Additional = []ManagedService{ - { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service1"}, - }, - }, - { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service2"}, - }, - }, - } - Expect(cluster.validateManagedServices()).To(BeNil()) - }) - }) - - Context("when there are duplicate names", func() { - It("should return an error", func() { - cluster.Spec.Managed.Services.Additional = []ManagedService{ - { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service1"}, - }, - }, - { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service1"}, - }, - }, - } - errs := cluster.validateManagedServices() - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) - Expect(errs[0].Field).To(Equal("spec.managed.services.additional")) - Expect(errs[0].Detail).To(ContainSubstring("contains services with the same .metadata.name")) - }) - }) - - Context("when service template validation fails", func() { - It("should return an error", func() { - cluster.Spec.Managed.Services.Additional = []ManagedService{ - { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: ""}, - }, - }, - } - errs := cluster.validateManagedServices() - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) - Expect(errs[0].Field).To(Equal("spec.managed.services.additional[0]")) - }) - - It("should not allow reserved service names", func() { - assertError := func(name string, index int, err *field.Error) { - expectedDetail := fmt.Sprintf("the service name: '%s' is reserved for operator use", name) - Expect(err.Type).To(Equal(field.ErrorTypeInvalid)) - Expect(err.Field).To(Equal(fmt.Sprintf("spec.managed.services.additional[%d]", index))) - Expect(err.Detail).To(Equal(expectedDetail)) - } - cluster.Spec.Managed.Services.Additional = []ManagedService{ - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadWriteName()}}}, - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadName()}}}, - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadOnlyName()}}}, - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceAnyName()}}}, - } - errs := cluster.validateManagedServices() - Expect(errs).To(HaveLen(4)) - assertError("test-rw", 0, errs[0]) - assertError("test-r", 1, errs[1]) - assertError("test-ro", 2, errs[2]) - assertError("test-any", 3, errs[3]) - }) - }) - - Context("disabledDefault service validation", func() { - It("should allow the disablement of ro and r service", func() { - cluster.Spec.Managed.Services.DisabledDefaultServices = []ServiceSelectorType{ - ServiceSelectorTypeR, - ServiceSelectorTypeRO, - } - errs := cluster.validateManagedServices() - Expect(errs).To(BeEmpty()) - }) - - It("should not allow the disablement of rw service", func() { - cluster.Spec.Managed.Services.DisabledDefaultServices = []ServiceSelectorType{ - ServiceSelectorTypeRW, - } - errs := cluster.validateManagedServices() - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) - Expect(errs[0].Field).To(Equal("spec.managed.services.disabledDefaultServices")) - }) - }) -}) - -var _ = Describe("ServiceTemplate Validation", func() { - var ( - path *field.Path - serviceSpecs ServiceTemplateSpec - ) - - BeforeEach(func() { - path = field.NewPath("spec") - }) - - Describe("validateServiceTemplate", func() { - Context("when name is required", func() { - It("should return an error if the name is empty", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: ""}, - } - - errs := validateServiceTemplate(path, true, serviceSpecs) - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Error()).To(ContainSubstring("name is required")) - }) - - It("should not return an error if the name is present", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "valid-name"}, - } - - errs := validateServiceTemplate(path, true, serviceSpecs) - Expect(errs).To(BeEmpty()) - }) - }) - - Context("when name is not allowed", func() { - It("should return an error if the name is present", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "invalid-name"}, - } - - errs := validateServiceTemplate(path, false, serviceSpecs) - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Error()).To(ContainSubstring("name is not allowed")) - }) - - It("should not return an error if the name is empty", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: ""}, - } - - errs := validateServiceTemplate(path, false, serviceSpecs) - Expect(errs).To(BeEmpty()) - }) - }) - - Context("when selector is present", func() { - It("should return an error if the selector is present", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "valid-name"}, - Spec: corev1.ServiceSpec{ - Selector: map[string]string{"app": "test"}, - }, - } - - errs := validateServiceTemplate(path, true, serviceSpecs) - Expect(errs).To(HaveLen(1)) - Expect(errs[0].Error()).To(ContainSubstring("selector field is managed by the operator")) - }) - - It("should not return an error if the selector is absent", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "valid-name"}, - Spec: corev1.ServiceSpec{ - Selector: map[string]string{}, - }, - } - - errs := validateServiceTemplate(path, true, serviceSpecs) - Expect(errs).To(BeEmpty()) - }) - }) - }) -}) - -var _ = Describe("setDefaultPlugins", func() { - It("adds pre-defined plugins if not already present", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Plugins: []PluginConfiguration{ - {Name: "existing-plugin", Enabled: ptr.To(true)}, - }, - }, - } - config := &configuration.Data{ - IncludePlugins: "predefined-plugin1,predefined-plugin2", - } - - cluster.setDefaultPlugins(config) - - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)})) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) - }) - - It("does not add pre-defined plugins if already present", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Plugins: []PluginConfiguration{ - {Name: "predefined-plugin1", Enabled: ptr.To(false)}, - }, - }, - } - config := &configuration.Data{ - IncludePlugins: "predefined-plugin1,predefined-plugin2", - } - - cluster.setDefaultPlugins(config) - - Expect(cluster.Spec.Plugins).To(HaveLen(2)) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)})) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) - }) - - It("handles empty plugin list gracefully", func() { - cluster := &Cluster{} - config := &configuration.Data{ - IncludePlugins: "predefined-plugin1", - } - - cluster.setDefaultPlugins(config) - - Expect(cluster.Spec.Plugins).To(HaveLen(1)) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) - }) -}) diff --git a/api/v1/clusterimagecatalog_funcs.go b/api/v1/clusterimagecatalog_funcs.go new file mode 100644 index 0000000000..1c28740e31 --- /dev/null +++ b/api/v1/clusterimagecatalog_funcs.go @@ -0,0 +1,25 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +// GetSpec returns the Spec of the ClusterImageCatalog +func (c *ClusterImageCatalog) GetSpec() *ImageCatalogSpec { + return &c.Spec +} diff --git a/api/v1/clusterimagecatalog_types.go b/api/v1/clusterimagecatalog_types.go index 4915a615a1..4bf405b629 100644 --- a/api/v1/clusterimagecatalog_types.go +++ b/api/v1/clusterimagecatalog_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -19,6 +22,7 @@ package v1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient +// +genclient:nonNamespaced // +kubebuilder:object:root=true // +kubebuilder:resource:scope=Cluster // +kubebuilder:storageversion @@ -45,16 +49,6 @@ type ClusterImageCatalogList struct { Items []ClusterImageCatalog `json:"items"` } -// GetObjectMeta returns the ObjectMeta of the ClusterImageCatalog -func (c *ClusterImageCatalog) GetObjectMeta() *metav1.ObjectMeta { - return &c.ObjectMeta -} - -// GetSpec returns the Spec of the ClusterImageCatalog -func (c *ClusterImageCatalog) GetSpec() *ImageCatalogSpec { - return &c.Spec -} - func init() { SchemeBuilder.Register(&ClusterImageCatalog{}, &ClusterImageCatalogList{}) } diff --git a/api/v1/common_types.go b/api/v1/common_types.go index fb5144ae5b..cf15b953b1 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -25,6 +28,7 @@ const VolumeSnapshotKind = "VolumeSnapshot" // not using the core data types. type Metadata struct { // The name of the resource. Only supported for certain types + // +optional Name string `json:"name,omitempty"` // Map of string keys and values that can be used to organize and categorize diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go new file mode 100644 index 0000000000..4cea22a0dc --- /dev/null +++ b/api/v1/database_funcs.go @@ -0,0 +1,90 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// SetAsFailed sets the database as failed with the given error +func (db *Database) SetAsFailed(err error) { + db.Status.Applied = ptr.To(false) + db.Status.Message = err.Error() +} + +// SetAsUnknown sets the database as unknown with the given error +func (db *Database) SetAsUnknown(err error) { + db.Status.Applied = nil + db.Status.Message = err.Error() +} + +// SetAsReady sets the database as working correctly +func (db *Database) SetAsReady() { + db.Status.Applied = ptr.To(true) + db.Status.Message = "" + db.Status.ObservedGeneration = db.Generation +} + +// GetStatusMessage returns the status message of the database +func (db *Database) GetStatusMessage() string { + return db.Status.Message +} + +// GetClusterRef returns the cluster reference of the database +func (db *Database) GetClusterRef() corev1.LocalObjectReference { + return db.Spec.ClusterRef +} + +// GetManagedObjectName returns the name of the managed database object +func (db *Database) GetManagedObjectName() string { + return db.Spec.Name +} + +// GetName returns the database object name +func (db *Database) GetName() string { + return db.Name +} + +// HasReconciliations returns true if the database object has been reconciled at least once +func (db *Database) HasReconciliations() bool { + return db.Status.ObservedGeneration > 0 +} + +// SetStatusObservedGeneration sets the observed generation of the database +func (db *Database) SetStatusObservedGeneration(obsGeneration int64) { + db.Status.ObservedGeneration = obsGeneration +} + +// MustHaveManagedResourceExclusivity detects conflicting databases +func (dbList *DatabaseList) MustHaveManagedResourceExclusivity(reference *Database) error { + pointers := toSliceWithPointers(dbList.Items) + return ensureManagedResourceExclusivity(reference, pointers) +} + +// GetEnsure gets the ensure status of the resource +func (dbObject DatabaseObjectSpec) GetEnsure() EnsureOption { + return dbObject.Ensure +} + +// GetName gets the name of the resource +func (dbObject DatabaseObjectSpec) GetName() string { + return dbObject.Name +} diff --git a/api/v1/database_types.go b/api/v1/database_types.go index f185a5a635..9baee51cdd 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -35,45 +38,258 @@ const ( DatabaseReclaimRetain DatabaseReclaimPolicy = "retain" ) -// DatabaseSpec is the specification of a Postgresql Database +// DatabaseSpec is the specification of a Postgresql Database, built around the +// `CREATE DATABASE`, `ALTER DATABASE`, and `DROP DATABASE` SQL commands of +// PostgreSQL. +// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`" type DatabaseSpec struct { - // The corresponding cluster + // The name of the PostgreSQL cluster hosting the database. ClusterRef corev1.LocalObjectReference `json:"cluster"` - // The name inside PostgreSQL + // Ensure the PostgreSQL database is `present` or `absent` - defaults to "present". + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure,omitempty"` + + // The name of the database to create inside PostgreSQL. This setting cannot be changed. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + // +kubebuilder:validation:XValidation:rule="self != 'postgres'",message="the name postgres is reserved" + // +kubebuilder:validation:XValidation:rule="self != 'template0'",message="the name template0 is reserved" + // +kubebuilder:validation:XValidation:rule="self != 'template1'",message="the name template1 is reserved" Name string `json:"name"` - // The owner + // Maps to the `OWNER` parameter of `CREATE DATABASE`. + // Maps to the `OWNER TO` command of `ALTER DATABASE`. + // The role name of the user who owns the database inside PostgreSQL. Owner string `json:"owner"` - // The encoding (cannot be changed) + // Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + // cannot be changed. The name of the template from which to create + // this database. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="template is immutable" + Template string `json:"template,omitempty"` + + // Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + // cannot be changed. Character set encoding to use in the database. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="encoding is immutable" // +optional Encoding string `json:"encoding,omitempty"` - // True when the database is a template + // Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + // cannot be changed. Sets the default collation order and character + // classification in the new database. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable" + // +optional + Locale string `json:"locale,omitempty"` + + // Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + // setting cannot be changed. This option sets the locale provider for + // databases created in the new cluster. Available from PostgreSQL 16. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeProvider is immutable" + // +optional + LocaleProvider string `json:"localeProvider,omitempty"` + + // Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + // setting cannot be changed. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCollate is immutable" + // +optional + LcCollate string `json:"localeCollate,omitempty"` + + // Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + // cannot be changed. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCType is immutable" + // +optional + LcCtype string `json:"localeCType,omitempty"` + + // Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + // setting cannot be changed. Specifies the ICU locale when the ICU + // provider is used. This option requires `localeProvider` to be set to + // `icu`. Available from PostgreSQL 15. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuLocale is immutable" + // +optional + IcuLocale string `json:"icuLocale,omitempty"` + + // Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + // cannot be changed. Specifies additional collation rules to customize + // the behavior of the default collation. This option requires + // `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuRules is immutable" + // +optional + IcuRules string `json:"icuRules,omitempty"` + + // Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + // setting cannot be changed. Specifies the locale name when the + // builtin provider is used. This option requires `localeProvider` to + // be set to `builtin`. Available from PostgreSQL 17. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtinLocale is immutable" + // +optional + BuiltinLocale string `json:"builtinLocale,omitempty"` + + // Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + // setting cannot be changed. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collationVersion is immutable" + // +optional + CollationVersion string `json:"collationVersion,omitempty"` + + // Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + // DATABASE`. If true, this database is considered a template and can + // be cloned by any user with `CREATEDB` privileges. // +optional IsTemplate *bool `json:"isTemplate,omitempty"` - // True when connections to this database are allowed + // Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + // `ALTER DATABASE`. If false then no one can connect to this database. // +optional AllowConnections *bool `json:"allowConnections,omitempty"` - // Connection limit, -1 means no limit and -2 means the - // database is not valid + // Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + // `ALTER DATABASE`. How many concurrent connections can be made to + // this database. -1 (the default) means no limit. // +optional ConnectionLimit *int `json:"connectionLimit,omitempty"` - // The default tablespace of this database + // Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + // Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + // The name of the tablespace (in PostgreSQL) that will be associated + // with the new database. This tablespace will be the default + // tablespace used for objects created in this database. // +optional Tablespace string `json:"tablespace,omitempty"` - // The policy for end-of-life maintenance of this database + // The policy for end-of-life maintenance of this database. // +kubebuilder:validation:Enum=delete;retain // +kubebuilder:default:=retain // +optional ReclaimPolicy DatabaseReclaimPolicy `json:"databaseReclaimPolicy,omitempty"` + + // The list of schemas to be managed in the database + // +optional + Schemas []SchemaSpec `json:"schemas,omitempty"` + + // The list of extensions to be managed in the database + // +optional + Extensions []ExtensionSpec `json:"extensions,omitempty"` + + // The list of foreign data wrappers to be managed in the database + // +optional + FDWs []FDWSpec `json:"fdws,omitempty"` +} + +// DatabaseObjectSpec contains the fields which are common to every +// database object +type DatabaseObjectSpec struct { + // Name of the extension/schema + Name string `json:"name"` + + // Specifies whether an extension/schema should be present or absent in + // the database. If set to `present`, the extension/schema will be + // created if it does not exist. If set to `absent`, the + // extension/schema will be removed if it exists. + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure"` +} + +// SchemaSpec configures a schema in a database +type SchemaSpec struct { + // Common fields + DatabaseObjectSpec `json:",inline"` + + // The role name of the user who owns the schema inside PostgreSQL. + // It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + // `OWNER TO` command of `ALTER SCHEMA`. + Owner string `json:"owner,omitempty"` +} + +// ExtensionSpec configures an extension in a database +type ExtensionSpec struct { + // Common fields + DatabaseObjectSpec `json:",inline"` + + // The version of the extension to install. If empty, the operator will + // install the default version (whatever is specified in the + // extension's control file) + Version string `json:"version,omitempty"` + + // The name of the schema in which to install the extension's objects, + // in case the extension allows its contents to be relocated. If not + // specified (default), and the extension's control file does not + // specify a schema either, the current default object creation schema + // is used. + Schema string `json:"schema,omitempty"` +} + +// FDWSpec configures an Foreign Data Wrapper in a database +type FDWSpec struct { + // Common fields + DatabaseObjectSpec `json:",inline"` + + // Name of the handler function (e.g., "postgres_fdw_handler"). + // This will be empty if no handler is specified. In that case, + // the default handler is registered when the FDW extension is created. + // +optional + Handler string `json:"handler,omitempty"` + + // Name of the validator function (e.g., "postgres_fdw_validator"). + // This will be empty if no validator is specified. In that case, + // the default validator is registered when the FDW extension is created. + // +optional + Validator string `json:"validator,omitempty"` + + // Owner specifies the database role that will own the Foreign Data Wrapper. + // The role must have superuser privileges in the target database. + // +optional + Owner string `json:"owner,omitempty"` + + // Options specifies the configuration options for the FDW + // (key is the option name, value is the option value). + // +optional + Options []OptionSpec `json:"options,omitempty"` + + // List of roles for which `USAGE` privileges on the FDW are granted or revoked. + // +optional + Usages []UsageSpec `json:"usage,omitempty"` +} + +// OptionSpec holds the name, value and the ensure field for an option +type OptionSpec struct { + // Name of the option + Name string `json:"name"` + + // Value and ensure field of the option + OptionSpecValue `json:",inline"` +} + +// OptionSpecValue holds the value and the ensure field for an option +type OptionSpecValue struct { + // Value of the option + Value string `json:"value"` + + // Specifies whether an option should be present or absent in + // the database. If set to `present`, the option will be + // created if it does not exist. If set to `absent`, the + // option will be removed if it exists. + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure,omitempty"` +} + +// UsageSpec configures a usage for a foreign data wrapper +type UsageSpec struct { + // Name of the usage + Name string `json:"name"` + + // The type of usage + // +kubebuilder:default:="grant" + // +kubebuilder:validation:Enum=grant;revoke + // +optional + Type string `json:"type,omitempty"` } // DatabaseStatus defines the observed state of Database @@ -83,20 +299,50 @@ type DatabaseStatus struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // Ready is true if the database was reconciled correctly - Ready bool `json:"ready,omitempty"` + // Applied is true if the database was reconciled correctly + // +optional + Applied *bool `json:"applied,omitempty"` + + // Message is the reconciliation output message + // +optional + Message string `json:"message,omitempty"` + + // Schemas is the status of the managed schemas + // +optional + Schemas []DatabaseObjectStatus `json:"schemas,omitempty"` - // Error is the reconciliation error message - Error string `json:"error,omitempty"` + // Extensions is the status of the managed extensions + // +optional + Extensions []DatabaseObjectStatus `json:"extensions,omitempty"` + + // FDWs is the status of the managed FDWs + // +optional + FDWs []DatabaseObjectStatus `json:"fdws,omitempty"` +} + +// DatabaseObjectStatus is the status of the managed database objects +type DatabaseObjectStatus struct { + // The name of the object + Name string `json:"name"` + + // True of the object has been installed successfully in + // the database + Applied bool `json:"applied"` + + // Message is the object reconciliation message + // +optional + Message string `json:"message,omitempty"` } +// +genclient // +kubebuilder:object:root=true +// +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" // +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" -// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready" -// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error",description="Latest error message" +// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message" // Database is the Schema for the databases API type Database struct { diff --git a/api/v1/doc.go b/api/v1/doc.go index 856c513680..73b01b1476 100644 --- a/api/v1/doc.go +++ b/api/v1/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package v1 contains API Schema definitions for the postgresql v1 API group diff --git a/api/v1/failoverquorum_types.go b/api/v1/failoverquorum_types.go new file mode 100644 index 0000000000..afa55cd001 --- /dev/null +++ b/api/v1/failoverquorum_types.go @@ -0,0 +1,83 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true + +// FailoverQuorumList contains a list of FailoverQuorum +type FailoverQuorumList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of failoverquorums + Items []FailoverQuorum `json:"items"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// FailoverQuorum contains the information about the current failover +// quorum status of a PG cluster. It is updated by the instance manager +// of the primary node and reset to zero by the operator to trigger +// an update. +type FailoverQuorum struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Most recently observed status of the failover quorum. + // +optional + Status FailoverQuorumStatus `json:"status"` +} + +// FailoverQuorumStatus is the latest observed status of the failover +// quorum of the PG cluster. +type FailoverQuorumStatus struct { + // Contains the latest reported Method value. + // +optional + Method string `json:"method,omitempty"` + + // StandbyNames is the list of potentially synchronous + // instance names. + // +optional + StandbyNames []string `json:"standbyNames,omitempty"` + + // StandbyNumber is the number of synchronous standbys that transactions + // need to wait for replies from. + // +optional + StandbyNumber int `json:"standbyNumber,omitempty"` + + // Primary is the name of the primary instance that updated + // this object the latest time. + // +optional + Primary string `json:"primary,omitempty"` +} + +func init() { + SchemeBuilder.Register(&FailoverQuorum{}, &FailoverQuorumList{}) +} diff --git a/api/v1/generic_funcs.go b/api/v1/generic_funcs.go new file mode 100644 index 0000000000..7397a6f4e2 --- /dev/null +++ b/api/v1/generic_funcs.go @@ -0,0 +1,67 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +type managedResourceComparer interface { + GetName() string + GetManagedObjectName() string + GetClusterRef() corev1.LocalObjectReference + HasReconciliations() bool +} + +func ensureManagedResourceExclusivity[T managedResourceComparer](t1 T, list []T) error { + for _, t2 := range list { + if t1.GetName() == t2.GetName() { + continue + } + + if t1.GetClusterRef().Name != t2.GetClusterRef().Name { + continue + } + + if !t2.HasReconciliations() { + continue + } + + if t1.GetManagedObjectName() == t2.GetManagedObjectName() { + return fmt.Errorf( + "%q is already managed by object %q", + t1.GetManagedObjectName(), t2.GetName(), + ) + } + } + + return nil +} + +// toSliceWithPointers converts a slice of items to a slice of pointers to the items +func toSliceWithPointers[T any](items []T) []*T { + result := make([]*T, len(items)) + for i := range items { + result[i] = &items[i] + } + return result +} diff --git a/api/v1/generic_funcs_test.go b/api/v1/generic_funcs_test.go new file mode 100644 index 0000000000..51429fd030 --- /dev/null +++ b/api/v1/generic_funcs_test.go @@ -0,0 +1,38 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type testStruct struct{ Val int } + +var _ = Describe("toSliceWithPointers", func() { + It("should return pointers to the original slice elements", func() { + items := []testStruct{{1}, {2}, {3}} + pointers := toSliceWithPointers(items) + Expect(pointers).To(HaveLen(len(items))) + for i := range items { + Expect(pointers[i]).To(BeIdenticalTo(&items[i])) + } + }) +}) diff --git a/tests/utils/lease.go b/api/v1/genericimagecatalog_iface.go similarity index 52% rename from tests/utils/lease.go rename to api/v1/genericimagecatalog_iface.go index a74de844be..2b03be377f 100644 --- a/tests/utils/lease.go +++ b/api/v1/genericimagecatalog_iface.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,22 +13,24 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" + "k8s.io/apimachinery/pkg/runtime" ) -// GetLeaderInfoFromLease gathers leader holderIdentity from the lease -func GetLeaderInfoFromLease(operatorNamespace string, env *TestingEnvironment) (string, error) { - leaseInterface := env.Interface.CoordinationV1().Leases(operatorNamespace) - lease, err := leaseInterface.Get(env.Ctx, controller.LeaderElectionID, metav1.GetOptions{}) - if err != nil { - return "", err - } - return *lease.Spec.HolderIdentity, nil +// +kubebuilder:object:generate=false + +// GenericImageCatalog is an interface used to manage ClusterImageCatalog and ImageCatalog in the same way +type GenericImageCatalog interface { + runtime.Object + metav1.Object + + // GetSpec returns the Spec of the GenericImageCatalog + GetSpec() *ImageCatalogSpec } diff --git a/api/v1/genericimagecatalog_types.go b/api/v1/genericimagecatalog_types.go deleted file mode 100644 index 11929b35bf..0000000000 --- a/api/v1/genericimagecatalog_types.go +++ /dev/null @@ -1,19 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// +kubebuilder:object:generate=false - -// GenericImageCatalog is an interface used to manage ClusterImageCatalog and ImageCatalog in the same way -type GenericImageCatalog interface { - runtime.Object - metav1.Object - - // GetObjectMeta returns the ObjectMeta of the GenericImageCatalog - GetObjectMeta() *metav1.ObjectMeta - // GetSpec returns the Spec of the GenericImageCatalog - GetSpec() *ImageCatalogSpec -} diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go index 44bab3db5b..02a19be715 100644 --- a/api/v1/groupversion_info.go +++ b/api/v1/groupversion_info.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package v1 contains API Schema definitions for the postgresql v1 API group -// +kubebuilder:object:generate=true -// +groupName=postgresql.cnpg.io package v1 import ( @@ -24,24 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/scheme" ) -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} - - // ClusterGVK is the triple to reach Cluster resources in k8s - ClusterGVK = schema.GroupVersionResource{ - Group: GroupVersion.Group, - Version: GroupVersion.Version, - Resource: "clusters", - } - - // PoolerGVK is the triple to reach Pooler resources in k8s - PoolerGVK = schema.GroupVersionResource{ - Group: GroupVersion.Group, - Version: GroupVersion.Version, - Resource: "poolers", - } - +const ( // ClusterKind is the kind name of Clusters ClusterKind = "Cluster" @@ -57,8 +40,22 @@ var ( // ClusterImageCatalogKind is the kind name of the cluster-wide image catalogs ClusterImageCatalogKind = "ClusterImageCatalog" + // PublicationKind is the kind name of publications + PublicationKind = "Publication" + + // SubscriptionKind is the kind name of subscriptions + SubscriptionKind = "Subscription" + + // DatabaseKind is the kind name of databases + DatabaseKind = "Database" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} + // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme diff --git a/api/v1/imagecatalog_funcs.go b/api/v1/imagecatalog_funcs.go index 22f3a80c90..b3a2b69da4 100644 --- a/api/v1/imagecatalog_funcs.go +++ b/api/v1/imagecatalog_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +13,17 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 +// GetSpec returns the Spec of the ImageCatalog +func (c *ImageCatalog) GetSpec() *ImageCatalogSpec { + return &c.Spec +} + // FindImageForMajor finds the correct image for the selected major version func (spec *ImageCatalogSpec) FindImageForMajor(major int) (string, bool) { for _, entry := range spec.Images { diff --git a/api/v1/imagecatalog_funcs_test.go b/api/v1/imagecatalog_funcs_test.go index 0fcb3d1bd4..e424398070 100644 --- a/api/v1/imagecatalog_funcs_test.go +++ b/api/v1/imagecatalog_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/imagecatalog_types.go b/api/v1/imagecatalog_types.go index c8c386ff22..76938faec6 100644 --- a/api/v1/imagecatalog_types.go +++ b/api/v1/imagecatalog_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -64,16 +67,6 @@ type ImageCatalogList struct { Items []ImageCatalog `json:"items"` } -// GetObjectMeta returns the ObjectMeta of the ImageCatalog -func (c *ImageCatalog) GetObjectMeta() *metav1.ObjectMeta { - return &c.ObjectMeta -} - -// GetSpec returns the Spec of the ImageCatalog -func (c *ImageCatalog) GetSpec() *ImageCatalogSpec { - return &c.Spec -} - func init() { SchemeBuilder.Register(&ImageCatalog{}, &ImageCatalogList{}) } diff --git a/api/v1/pooler_funcs.go b/api/v1/pooler_funcs.go new file mode 100644 index 0000000000..f1ca476e1a --- /dev/null +++ b/api/v1/pooler_funcs.go @@ -0,0 +1,75 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import corev1 "k8s.io/api/core/v1" + +// IsPaused returns whether all database should be paused or not. +func (in PgBouncerSpec) IsPaused() bool { + return in.Paused != nil && *in.Paused +} + +// GetAuthQuerySecretName returns the specified AuthQuerySecret name for PgBouncer +// if provided or the default name otherwise. +func (in *Pooler) GetAuthQuerySecretName() string { + if in.Spec.PgBouncer != nil && in.Spec.PgBouncer.AuthQuerySecret != nil { + return in.Spec.PgBouncer.AuthQuerySecret.Name + } + + return in.Spec.Cluster.Name + DefaultPgBouncerPoolerSecretSuffix +} + +// GetAuthQuery returns the specified AuthQuery name for PgBouncer +// if provided or the default name otherwise. +func (in *Pooler) GetAuthQuery() string { + if in.Spec.PgBouncer.AuthQuery != "" { + return in.Spec.PgBouncer.AuthQuery + } + + return DefaultPgBouncerPoolerAuthQuery +} + +// IsAutomatedIntegration returns whether the Pooler integration with the +// Cluster is automated or not. +func (in *Pooler) IsAutomatedIntegration() bool { + if in.Spec.PgBouncer == nil { + return true + } + // If the user specified an AuthQuerySecret or an AuthQuery, the integration + // is not going to be handled by the operator. + if (in.Spec.PgBouncer.AuthQuerySecret != nil && in.Spec.PgBouncer.AuthQuerySecret.Name != "") || + in.Spec.PgBouncer.AuthQuery != "" { + return false + } + return true +} + +// GetResourcesRequirements returns the resource requirements for the Pooler +func (in *Pooler) GetResourcesRequirements() corev1.ResourceRequirements { + if in.Spec.Template == nil { + return corev1.ResourceRequirements{} + } + + if in.Spec.Template.Spec.Resources == nil { + return corev1.ResourceRequirements{} + } + + return *in.Spec.Template.Spec.Resources +} diff --git a/api/v1/pooler_types_test.go b/api/v1/pooler_funcs_test.go similarity index 87% rename from api/v1/pooler_types_test.go rename to api/v1/pooler_funcs_test.go index 7a0383d703..6e452aa152 100644 --- a/api/v1/pooler_types_test.go +++ b/api/v1/pooler_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go index e366bf31a6..3dd668394b 100644 --- a/api/v1/pooler_types.go +++ b/api/v1/pooler_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -25,7 +28,7 @@ import ( // PoolerType is the type of the connection pool, meaning the service // we are targeting. Allowed values are `rw` and `ro`. -// +kubebuilder:validation:Enum=rw;ro +// +kubebuilder:validation:Enum=rw;ro;r type PoolerType string const ( @@ -35,6 +38,9 @@ const ( // PoolerTypeRO means that the pooler involves only the replicas PoolerTypeRO = PoolerType("ro") + // PoolerTypeR means that the pooler involves every instance + PoolerTypeR = PoolerType("r") + // DefaultPgBouncerPoolerAuthQuery is the default auth_query for PgBouncer DefaultPgBouncerPoolerAuthQuery = "SELECT usename, passwd FROM public.user_search($1)" ) @@ -184,11 +190,6 @@ type PgBouncerSpec struct { Paused *bool `json:"paused,omitempty"` } -// IsPaused returns whether all database should be paused or not. -func (in PgBouncerSpec) IsPaused() bool { - return in.Paused != nil && *in.Paused -} - // PoolerStatus defines the observed state of Pooler type PoolerStatus struct { // The resource version of the config object @@ -265,6 +266,7 @@ type Pooler struct { // PoolerList contains a list of Pooler type PoolerList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty"` Items []Pooler `json:"items"` } @@ -272,38 +274,3 @@ type PoolerList struct { func init() { SchemeBuilder.Register(&Pooler{}, &PoolerList{}) } - -// GetAuthQuerySecretName returns the specified AuthQuerySecret name for PgBouncer -// if provided or the default name otherwise. -func (in *Pooler) GetAuthQuerySecretName() string { - if in.Spec.PgBouncer != nil && in.Spec.PgBouncer.AuthQuerySecret != nil { - return in.Spec.PgBouncer.AuthQuerySecret.Name - } - - return in.Spec.Cluster.Name + DefaultPgBouncerPoolerSecretSuffix -} - -// GetAuthQuery returns the specified AuthQuery name for PgBouncer -// if provided or the default name otherwise. -func (in *Pooler) GetAuthQuery() string { - if in.Spec.PgBouncer.AuthQuery != "" { - return in.Spec.PgBouncer.AuthQuery - } - - return DefaultPgBouncerPoolerAuthQuery -} - -// IsAutomatedIntegration returns whether the Pooler integration with the -// Cluster is automated or not. -func (in *Pooler) IsAutomatedIntegration() bool { - if in.Spec.PgBouncer == nil { - return true - } - // If the user specified an AuthQuerySecret or an AuthQuery, the integration - // is not going to be handled by the operator. - if (in.Spec.PgBouncer.AuthQuerySecret != nil && in.Spec.PgBouncer.AuthQuerySecret.Name != "") || - in.Spec.PgBouncer.AuthQuery != "" { - return false - } - return true -} diff --git a/api/v1/pooler_webhook.go b/api/v1/pooler_webhook.go deleted file mode 100644 index c82205ed0c..0000000000 --- a/api/v1/pooler_webhook.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "github.com/cloudnative-pg/machinery/pkg/log" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" -) - -var ( - // poolerLog is for logging in this package. - poolerLog = log.WithName("pooler-resource").WithValues("version", "v1") - - // AllowedPgbouncerGenericConfigurationParameters is the list of allowed parameters for PgBouncer - AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{ - "application_name_add_host", - "autodb_idle_timeout", - "client_idle_timeout", - "client_login_timeout", - "default_pool_size", - "disable_pqexec", - "idle_transaction_timeout", - "ignore_startup_parameters", - "log_connections", - "log_disconnections", - "log_pooler_errors", - "log_stats", - "max_client_conn", - "max_db_connections", - "max_prepared_statements", - "max_user_connections", - "min_pool_size", - "query_timeout", - "query_wait_timeout", - "reserve_pool_size", - "reserve_pool_timeout", - "server_check_delay", - "server_check_query", - "server_connect_timeout", - "server_fast_close", - "server_idle_timeout", - "server_lifetime", - "server_login_retry", - "server_reset_query", - "server_reset_query_always", - "server_round_robin", - "stats_period", - "tcp_keepalive", - "tcp_keepcnt", - "tcp_keepidle", - "tcp_keepintvl", - "tcp_user_timeout", - "verbose", - }) -) - -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *Pooler) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-pooler,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=poolers,versions=v1,name=vpooler.cnpg.io,sideEffects=None - -var _ webhook.Validator = &Pooler{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Pooler) ValidateCreate() (warns admission.Warnings, err error) { - poolerLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - - if !r.IsAutomatedIntegration() { - poolerLog.Info("Pooler not automatically configured, manual configuration required", - "name", r.Name, "namespace", r.Namespace, "cluster", r.Spec.Cluster.Name) - warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ - "Manually configure it as described in the docs.", r.Name, r.Spec.Cluster.Name, r.Namespace)) - } - - allErrs := r.Validate() - - if len(allErrs) == 0 { - return warns, nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, - r.Name, allErrs) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Pooler) ValidateUpdate(old runtime.Object) (warns admission.Warnings, err error) { - poolerLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - - oldPooler := old.(*Pooler) - - if oldPooler.IsAutomatedIntegration() && !r.IsAutomatedIntegration() { - poolerLog.Info("Pooler not automatically configured, manual configuration required", - "name", r.Name, "namespace", r.Namespace, "cluster", r.Spec.Cluster.Name) - warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ - "Manually configure it as described in the docs.", r.Name, r.Spec.Cluster.Name, r.Namespace)) - } - - allErrs := r.Validate() - if len(allErrs) == 0 { - return nil, nil - } - - return warns, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, - r.Name, allErrs) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Pooler) ValidateDelete() (admission.Warnings, error) { - poolerLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -func (r *Pooler) validatePgBouncer() field.ErrorList { - var result field.ErrorList - switch { - case r.Spec.PgBouncer == nil: - result = append(result, - field.Invalid( - field.NewPath("spec", "pgbouncer"), - "", "required pgbouncer configuration")) - case r.Spec.PgBouncer.AuthQuerySecret != nil && r.Spec.PgBouncer.AuthQuerySecret.Name != "" && - r.Spec.PgBouncer.AuthQuery == "": - result = append(result, - field.Invalid( - field.NewPath("spec", "pgbouncer", "authQuery"), - "", "must specify an auth query when providing an auth query secret")) - case (r.Spec.PgBouncer.AuthQuerySecret == nil || r.Spec.PgBouncer.AuthQuerySecret.Name == "") && - r.Spec.PgBouncer.AuthQuery != "": - result = append(result, - field.Invalid( - field.NewPath("spec", "pgbouncer", "authQuerySecret", "name"), - "", "must specify an existing auth query secret when providing an auth query secret")) - } - - if r.Spec.PgBouncer != nil && len(r.Spec.PgBouncer.Parameters) > 0 { - result = append(result, r.validatePgbouncerGenericParameters()...) - } - - return result -} - -func (r *Pooler) validateCluster() field.ErrorList { - var result field.ErrorList - if r.Spec.Cluster.Name == "" { - result = append(result, - field.Invalid( - field.NewPath("spec", "cluster", "name"), - "", "must specify a cluster name")) - } - if r.Spec.Cluster.Name == r.Name { - result = append(result, - field.Invalid( - field.NewPath("metadata", "name"), - r.Name, "the pooler resource cannot have the same name of a cluster")) - } - return result -} - -// Validate validates the configuration of a Pooler, returning -// a list of errors -func (r *Pooler) Validate() (allErrs field.ErrorList) { - allErrs = append(allErrs, r.validatePgBouncer()...) - allErrs = append(allErrs, r.validateCluster()...) - return allErrs -} - -// validatePgbouncerGenericParameters validates pgbouncer parameters -func (r *Pooler) validatePgbouncerGenericParameters() field.ErrorList { - var result field.ErrorList - - for param := range r.Spec.PgBouncer.Parameters { - if !AllowedPgbouncerGenericConfigurationParameters.Has(param) { - result = append(result, - field.Invalid( - field.NewPath("spec", "cluster", "parameters"), - param, "Invalid or reserved parameter")) - } - } - return result -} diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go new file mode 100644 index 0000000000..b74947674f --- /dev/null +++ b/api/v1/publication_funcs.go @@ -0,0 +1,80 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// SetAsFailed sets the publication as failed with the given error +func (pub *Publication) SetAsFailed(err error) { + pub.Status.Applied = ptr.To(false) + pub.Status.Message = err.Error() +} + +// SetAsUnknown sets the publication as unknown with the given error +func (pub *Publication) SetAsUnknown(err error) { + pub.Status.Applied = nil + pub.Status.Message = err.Error() +} + +// SetAsReady sets the subscription as working correctly +func (pub *Publication) SetAsReady() { + pub.Status.Applied = ptr.To(true) + pub.Status.Message = "" + pub.Status.ObservedGeneration = pub.Generation +} + +// GetStatusMessage returns the status message of the publication +func (pub *Publication) GetStatusMessage() string { + return pub.Status.Message +} + +// GetClusterRef returns the cluster reference of the publication +func (pub *Publication) GetClusterRef() corev1.LocalObjectReference { + return pub.Spec.ClusterRef +} + +// GetManagedObjectName returns the name of the managed publication object +func (pub *Publication) GetManagedObjectName() string { + return pub.Spec.Name +} + +// HasReconciliations returns true if the publication has been reconciled at least once +func (pub *Publication) HasReconciliations() bool { + return pub.Status.ObservedGeneration > 0 +} + +// GetName returns the publication name +func (pub *Publication) GetName() string { + return pub.Name +} + +// SetStatusObservedGeneration sets the observed generation of the publication +func (pub *Publication) SetStatusObservedGeneration(obsGeneration int64) { + pub.Status.ObservedGeneration = obsGeneration +} + +// MustHaveManagedResourceExclusivity detects conflicting publications +func (pub *PublicationList) MustHaveManagedResourceExclusivity(reference *Publication) error { + pointers := toSliceWithPointers(pub.Items) + return ensureManagedResourceExclusivity(reference, pointers) +} diff --git a/api/v1/publication_types.go b/api/v1/publication_types.go new file mode 100644 index 0000000000..86edeb4157 --- /dev/null +++ b/api/v1/publication_types.go @@ -0,0 +1,165 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications. +// +enum +type PublicationReclaimPolicy string + +const ( + // PublicationReclaimDelete means the publication will be deleted from Kubernetes on release + // from its claim. + PublicationReclaimDelete PublicationReclaimPolicy = "delete" + + // PublicationReclaimRetain means the publication will be left in its current phase for manual + // reclamation by the administrator. The default policy is Retain. + PublicationReclaimRetain PublicationReclaimPolicy = "retain" +) + +// PublicationSpec defines the desired state of Publication +type PublicationSpec struct { + // The name of the PostgreSQL cluster that identifies the "publisher" + ClusterRef corev1.LocalObjectReference `json:"cluster"` + + // The name of the publication inside PostgreSQL + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + Name string `json:"name"` + + // The name of the database where the publication will be installed in + // the "publisher" cluster + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable" + DBName string `json:"dbname"` + + // Publication parameters part of the `WITH` clause as expected by + // PostgreSQL `CREATE PUBLICATION` command + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // Target of the publication as expected by PostgreSQL `CREATE PUBLICATION` command + Target PublicationTarget `json:"target"` + + // The policy for end-of-life maintenance of this publication + // +kubebuilder:validation:Enum=delete;retain + // +kubebuilder:default:=retain + // +optional + ReclaimPolicy PublicationReclaimPolicy `json:"publicationReclaimPolicy,omitempty"` +} + +// PublicationTarget is what this publication should publish +// +kubebuilder:validation:XValidation:rule="(has(self.allTables) && !has(self.objects)) || (!has(self.allTables) && has(self.objects))",message="allTables and objects are mutually exclusive" +type PublicationTarget struct { + // Marks the publication as one that replicates changes for all tables + // in the database, including tables created in the future. + // Corresponding to `FOR ALL TABLES` in PostgreSQL. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="allTables is immutable" + // +optional + AllTables bool `json:"allTables,omitempty"` + + // Just the following schema objects + // +kubebuilder:validation:XValidation:rule="!(self.exists(o, has(o.table) && has(o.table.columns)) && self.exists(o, has(o.tablesInSchema)))",message="specifying a column list when the publication also publishes tablesInSchema is not supported" + // +kubebuilder:validation:MaxItems=100000 + // +optional + Objects []PublicationTargetObject `json:"objects,omitempty"` +} + +// PublicationTargetObject is an object to publish +// +kubebuilder:validation:XValidation:rule="(has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) && has(self.table))",message="tablesInSchema and table are mutually exclusive" +type PublicationTargetObject struct { + // Marks the publication as one that replicates changes for all tables + // in the specified list of schemas, including tables created in the + // future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + // +optional + TablesInSchema string `json:"tablesInSchema,omitempty"` + + // Specifies a list of tables to add to the publication. Corresponding + // to `FOR TABLE` in PostgreSQL. + // +optional + Table *PublicationTargetTable `json:"table,omitempty"` +} + +// PublicationTargetTable is a table to publish +type PublicationTargetTable struct { + // Whether to limit to the table only or include all its descendants + // +optional + Only bool `json:"only,omitempty"` + + // The table name + Name string `json:"name"` + + // The schema name + // +optional + Schema string `json:"schema,omitempty"` + + // The columns to publish + // +optional + Columns []string `json:"columns,omitempty"` +} + +// PublicationStatus defines the observed state of Publication +type PublicationStatus struct { + // A sequence number representing the latest + // desired state that was synchronized + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Applied is true if the publication was reconciled correctly + // +optional + Applied *bool `json:"applied,omitempty"` + + // Message is the reconciliation output message + // +optional + Message string `json:"message,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" +// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message" + +// Publication is the Schema for the publications API +type Publication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec PublicationSpec `json:"spec"` + Status PublicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PublicationList contains a list of Publication +type PublicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Publication `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Publication{}, &PublicationList{}) +} diff --git a/api/v1/scheduledbackup_funcs.go b/api/v1/scheduledbackup_funcs.go new file mode 100644 index 0000000000..84ad3de840 --- /dev/null +++ b/api/v1/scheduledbackup_funcs.go @@ -0,0 +1,94 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// IsSuspended check if a scheduled backup has been suspended or not +func (scheduledBackup ScheduledBackup) IsSuspended() bool { + if scheduledBackup.Spec.Suspend == nil { + return false + } + + return *scheduledBackup.Spec.Suspend +} + +// IsImmediate check if a backup has to be issued immediately upon creation or not +func (scheduledBackup ScheduledBackup) IsImmediate() bool { + if scheduledBackup.Spec.Immediate == nil { + return false + } + + return *scheduledBackup.Spec.Immediate +} + +// GetName gets the scheduled backup name +func (scheduledBackup *ScheduledBackup) GetName() string { + return scheduledBackup.Name +} + +// GetNamespace gets the scheduled backup name +func (scheduledBackup *ScheduledBackup) GetNamespace() string { + return scheduledBackup.Namespace +} + +// GetSchedule get the cron-like schedule of this scheduled backup +func (scheduledBackup *ScheduledBackup) GetSchedule() string { + return scheduledBackup.Spec.Schedule +} + +// GetStatus gets the status that the caller may update +func (scheduledBackup *ScheduledBackup) GetStatus() *ScheduledBackupStatus { + return &scheduledBackup.Status +} + +// CreateBackup creates a backup from this scheduled backup +func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup { + backup := Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: scheduledBackup.Namespace, + }, + Spec: BackupSpec{ + Cluster: scheduledBackup.Spec.Cluster, + Target: scheduledBackup.Spec.Target, + Method: scheduledBackup.Spec.Method, + Online: scheduledBackup.Spec.Online, + OnlineConfiguration: scheduledBackup.Spec.OnlineConfiguration, + PluginConfiguration: scheduledBackup.Spec.PluginConfiguration, + }, + } + utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current) + + if backup.Annotations == nil { + backup.Annotations = make(map[string]string) + } + + if v := scheduledBackup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]; v != "" { + backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] = v + } + + return &backup +} diff --git a/api/v1/scheduledbackup_types_test.go b/api/v1/scheduledbackup_funcs_test.go similarity index 71% rename from api/v1/scheduledbackup_types_test.go rename to api/v1/scheduledbackup_funcs_test.go index e68b20ba42..2f5ff2b0ab 100644 --- a/api/v1/scheduledbackup_types_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,23 +13,34 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 import ( - "k8s.io/utils/ptr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("Scheduled backup", func() { - scheduledBackup := &ScheduledBackup{} + var scheduledBackup *ScheduledBackup backupName := "test" + BeforeEach(func() { + scheduledBackup = &ScheduledBackup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: make(map[string]string), + }, + } + }) + It("properly creates a backup with no annotations", func() { backup := scheduledBackup.CreateBackup("test") Expect(backup).ToNot(BeNil()) @@ -36,6 +48,14 @@ var _ = Describe("Scheduled backup", func() { Expect(backup.Annotations).To(BeEmpty()) }) + It("should always inherit volumeSnapshotDeadline while creating a backup", func() { + scheduledBackup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] = "20" + backup := scheduledBackup.CreateBackup("test") + Expect(backup).ToNot(BeNil()) + Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName)) + Expect(backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]).To(BeEquivalentTo("20")) + }) + It("properly creates a backup with annotations", func() { annotations := make(map[string]string, 1) annotations["test"] = "annotations" @@ -64,30 +84,4 @@ var _ = Describe("Scheduled backup", func() { Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName)) Expect(backup.Spec.Target).To(BeEquivalentTo(BackupTargetPrimary)) }) - - It("complains if online is set on a barman backup", func() { - scheduledBackup := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Method: BackupMethodBarmanObjectStore, - Online: ptr.To(true), - Schedule: "* * * * * *", - }, - } - result := scheduledBackup.validate() - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.online")) - }) - - It("complains if onlineConfiguration is set on a barman backup", func() { - scheduledBackup := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Method: BackupMethodBarmanObjectStore, - OnlineConfiguration: &OnlineConfiguration{}, - Schedule: "* * * * * *", - }, - } - result := scheduledBackup.validate() - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) - }) }) diff --git a/api/v1/scheduledbackup_types.go b/api/v1/scheduledbackup_types.go index e75ce2f2bc..b17671e367 100644 --- a/api/v1/scheduledbackup_types.go +++ b/api/v1/scheduledbackup_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // ScheduledBackupSpec defines the desired state of ScheduledBackup @@ -128,69 +128,12 @@ type ScheduledBackupList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of clusters Items []ScheduledBackup `json:"items"` } -// IsSuspended check if a scheduled backup has been suspended or not -func (scheduledBackup ScheduledBackup) IsSuspended() bool { - if scheduledBackup.Spec.Suspend == nil { - return false - } - - return *scheduledBackup.Spec.Suspend -} - -// IsImmediate check if a backup has to be issued immediately upon creation or not -func (scheduledBackup ScheduledBackup) IsImmediate() bool { - if scheduledBackup.Spec.Immediate == nil { - return false - } - - return *scheduledBackup.Spec.Immediate -} - -// GetName gets the scheduled backup name -func (scheduledBackup *ScheduledBackup) GetName() string { - return scheduledBackup.Name -} - -// GetNamespace gets the scheduled backup name -func (scheduledBackup *ScheduledBackup) GetNamespace() string { - return scheduledBackup.Namespace -} - -// GetSchedule get the cron-like schedule of this scheduled backup -func (scheduledBackup *ScheduledBackup) GetSchedule() string { - return scheduledBackup.Spec.Schedule -} - -// GetStatus gets the status that the caller may update -func (scheduledBackup *ScheduledBackup) GetStatus() *ScheduledBackupStatus { - return &scheduledBackup.Status -} - -// CreateBackup creates a backup from this scheduled backup -func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup { - backup := Backup{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: scheduledBackup.Namespace, - }, - Spec: BackupSpec{ - Cluster: scheduledBackup.Spec.Cluster, - Target: scheduledBackup.Spec.Target, - Method: scheduledBackup.Spec.Method, - Online: scheduledBackup.Spec.Online, - OnlineConfiguration: scheduledBackup.Spec.OnlineConfiguration, - PluginConfiguration: scheduledBackup.Spec.PluginConfiguration, - }, - } - utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current) - return &backup -} - func init() { SchemeBuilder.Register(&ScheduledBackup{}, &ScheduledBackupList{}) } diff --git a/api/v1/scheduledbackup_webhook.go b/api/v1/scheduledbackup_webhook.go deleted file mode 100644 index e1aebeedf5..0000000000 --- a/api/v1/scheduledbackup_webhook.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/robfig/cron" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// scheduledBackupLog is for logging in this package. -var scheduledBackupLog = log.WithName("scheduledbackup-resource").WithValues("version", "v1") - -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *ScheduledBackup) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-scheduledbackup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,verbs=create;update,versions=v1,name=mscheduledbackup.cnpg.io,sideEffects=None - -var _ webhook.Defaulter = &ScheduledBackup{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *ScheduledBackup) Default() { - scheduledBackupLog.Info("default", "name", r.Name, "namespace", r.Namespace) -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-scheduledbackup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,versions=v1,name=vscheduledbackup.cnpg.io,sideEffects=None - -var _ webhook.Validator = &ScheduledBackup{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ScheduledBackup) ValidateCreate() (admission.Warnings, error) { - var allErrs field.ErrorList - scheduledBackupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - - allErrs = append(allErrs, r.validate()...) - - if len(allErrs) == 0 { - return nil, nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "scheduledbackup.cnpg.io", Kind: "Backup"}, - r.Name, allErrs) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ScheduledBackup) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { - scheduledBackupLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ScheduledBackup) ValidateDelete() (admission.Warnings, error) { - scheduledBackupLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -func (r *ScheduledBackup) validate() field.ErrorList { - var result field.ErrorList - - if _, err := cron.Parse(r.GetSchedule()); err != nil { - result = append(result, - field.Invalid( - field.NewPath("spec", "schedule"), - r.Spec.Schedule, err.Error())) - } - if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { - result = append(result, field.Invalid( - field.NewPath("spec", "method"), - r.Spec.Method, - "Cannot use volumeSnapshot backup method due to missing "+ - "VolumeSnapshot CRD. If you installed the CRD after having "+ - "started the operator, please restart it to enable "+ - "VolumeSnapshot support", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.Online != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "online"), - r.Spec.Online, - "Online parameter can be specified only if the method is volumeSnapshot", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "onlineConfiguration"), - r.Spec.OnlineConfiguration, - "OnlineConfiguration parameter can be specified only if the method is volumeSnapshot", - )) - } - - return result -} diff --git a/api/v1/scheduledbackup_webhook_test.go b/api/v1/scheduledbackup_webhook_test.go deleted file mode 100644 index b31e954741..0000000000 --- a/api/v1/scheduledbackup_webhook_test.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Validate schedule", func() { - It("doesn't complain if there's a schedule", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * *", - }, - } - - result := schedule.validate() - Expect(result).To(BeEmpty()) - }) - - It("complain with a wrong time", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * * 1996", - }, - } - - result := schedule.validate() - Expect(result).To(HaveLen(1)) - }) - - It("doesn't complain if VolumeSnapshot CRD is present", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * *", - Method: BackupMethodVolumeSnapshot, - }, - } - utils.SetVolumeSnapshot(true) - result := schedule.validate() - Expect(result).To(BeEmpty()) - }) - - It("complains if VolumeSnapshot CRD is not present", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * *", - Method: BackupMethodVolumeSnapshot, - }, - } - utils.SetVolumeSnapshot(false) - result := schedule.validate() - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.method")) - }) -}) diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go new file mode 100644 index 0000000000..340cbb2d07 --- /dev/null +++ b/api/v1/subscription_funcs.go @@ -0,0 +1,80 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// SetAsFailed sets the subscription as failed with the given error +func (sub *Subscription) SetAsFailed(err error) { + sub.Status.Applied = ptr.To(false) + sub.Status.Message = err.Error() +} + +// SetAsUnknown sets the subscription as unknown with the given error +func (sub *Subscription) SetAsUnknown(err error) { + sub.Status.Applied = nil + sub.Status.Message = err.Error() +} + +// SetAsReady sets the subscription as working correctly +func (sub *Subscription) SetAsReady() { + sub.Status.Applied = ptr.To(true) + sub.Status.Message = "" + sub.Status.ObservedGeneration = sub.Generation +} + +// GetStatusMessage returns the status message of the subscription +func (sub *Subscription) GetStatusMessage() string { + return sub.Status.Message +} + +// GetClusterRef returns the cluster reference of the subscription +func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference { + return sub.Spec.ClusterRef +} + +// GetName returns the subscription object name +func (sub *Subscription) GetName() string { + return sub.Name +} + +// GetManagedObjectName returns the name of the managed subscription object +func (sub *Subscription) GetManagedObjectName() string { + return sub.Spec.Name +} + +// HasReconciliations returns true if the subscription has been reconciled at least once +func (sub *Subscription) HasReconciliations() bool { + return sub.Status.ObservedGeneration > 0 +} + +// SetStatusObservedGeneration sets the observed generation of the subscription +func (sub *Subscription) SetStatusObservedGeneration(obsGeneration int64) { + sub.Status.ObservedGeneration = obsGeneration +} + +// MustHaveManagedResourceExclusivity detects conflicting subscriptions +func (pub *SubscriptionList) MustHaveManagedResourceExclusivity(reference *Subscription) error { + pointers := toSliceWithPointers(pub.Items) + return ensureManagedResourceExclusivity(reference, pointers) +} diff --git a/api/v1/subscription_types.go b/api/v1/subscription_types.go new file mode 100644 index 0000000000..adf9f70f18 --- /dev/null +++ b/api/v1/subscription_types.go @@ -0,0 +1,127 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions. +// +enum +type SubscriptionReclaimPolicy string + +const ( + // SubscriptionReclaimDelete means the subscription will be deleted from Kubernetes on release + // from its claim. + SubscriptionReclaimDelete SubscriptionReclaimPolicy = "delete" + + // SubscriptionReclaimRetain means the subscription will be left in its current phase for manual + // reclamation by the administrator. The default policy is Retain. + SubscriptionReclaimRetain SubscriptionReclaimPolicy = "retain" +) + +// SubscriptionSpec defines the desired state of Subscription +type SubscriptionSpec struct { + // The name of the PostgreSQL cluster that identifies the "subscriber" + ClusterRef corev1.LocalObjectReference `json:"cluster"` + + // The name of the subscription inside PostgreSQL + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + Name string `json:"name"` + + // The name of the database where the publication will be installed in + // the "subscriber" cluster + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable" + DBName string `json:"dbname"` + + // Subscription parameters included in the `WITH` clause of the PostgreSQL + // `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + // after the subscription is created and will be ignored if modified + // later, except for a limited set documented at: + // https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // The name of the publication inside the PostgreSQL database in the + // "publisher" + PublicationName string `json:"publicationName"` + + // The name of the database containing the publication on the external + // cluster. Defaults to the one in the external cluster definition. + // +optional + PublicationDBName string `json:"publicationDBName,omitempty"` + + // The name of the external cluster with the publication ("publisher") + ExternalClusterName string `json:"externalClusterName"` + + // The policy for end-of-life maintenance of this subscription + // +kubebuilder:validation:Enum=delete;retain + // +kubebuilder:default:=retain + // +optional + ReclaimPolicy SubscriptionReclaimPolicy `json:"subscriptionReclaimPolicy,omitempty"` +} + +// SubscriptionStatus defines the observed state of Subscription +type SubscriptionStatus struct { + // A sequence number representing the latest + // desired state that was synchronized + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Applied is true if the subscription was reconciled correctly + // +optional + Applied *bool `json:"applied,omitempty"` + + // Message is the reconciliation output message + // +optional + Message string `json:"message,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" +// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message" + +// Subscription is the Schema for the subscriptions API +type Subscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec SubscriptionSpec `json:"spec"` + Status SubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubscriptionList contains a list of Subscription +type SubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Subscription `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Subscription{}, &SubscriptionList{}) +} diff --git a/api/v1/suite_test.go b/api/v1/suite_test.go index ef8c13c144..63bb2acfcc 100644 --- a/api/v1/suite_test.go +++ b/api/v1/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 8b0a114f63..65c47bb873 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ //go:build !ignore_autogenerated /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +15,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Code generated by controller-gen. DO NOT EDIT. @@ -21,9 +24,6 @@ limitations under the License. package v1 import ( - pkgapi "github.com/cloudnative-pg/barman-cloud/pkg/api" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/machinery/pkg/api" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -132,7 +132,7 @@ func (in *BackupConfiguration) DeepCopyInto(out *BackupConfiguration) { } if in.BarmanObjectStore != nil { in, out := &in.BarmanObjectStore, &out.BarmanObjectStore - *out = new(pkgapi.BarmanObjectStoreConfiguration) + *out = new(BarmanObjectStoreConfiguration) (*in).DeepCopyInto(*out) } } @@ -239,10 +239,10 @@ func (in *BackupSnapshotStatus) DeepCopy() *BackupSnapshotStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSource) DeepCopyInto(out *BackupSource) { *out = *in - out.LocalObjectReference = in.LocalObjectReference + in.LocalObjectReference.DeepCopyInto(&out.LocalObjectReference) if in.EndpointCA != nil { in, out := &in.EndpointCA, &out.EndpointCA - *out = new(api.SecretKeySelector) + *out = new(SecretKeySelector) **out = **in } } @@ -260,7 +260,7 @@ func (in *BackupSource) DeepCopy() *BackupSource { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { *out = *in - out.Cluster = in.Cluster + in.Cluster.DeepCopyInto(&out.Cluster) if in.PluginConfiguration != nil { in, out := &in.PluginConfiguration, &out.PluginConfiguration *out = new(BackupPluginConfiguration) @@ -294,7 +294,7 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { in.BarmanCredentials.DeepCopyInto(&out.BarmanCredentials) if in.EndpointCA != nil { in, out := &in.EndpointCA, &out.EndpointCA - *out = new(api.SecretKeySelector) + *out = new(SecretKeySelector) **out = **in } if in.StartedAt != nil { @@ -326,6 +326,13 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { *out = new(bool) **out = **in } + if in.PluginMetadata != nil { + in, out := &in.PluginMetadata, &out.PluginMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. @@ -373,7 +380,7 @@ func (in *BootstrapInitDB) DeepCopyInto(out *BootstrapInitDB) { *out = *in if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.Options != nil { @@ -438,7 +445,7 @@ func (in *BootstrapPgBaseBackup) DeepCopyInto(out *BootstrapPgBaseBackup) { *out = *in if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } } @@ -473,7 +480,7 @@ func (in *BootstrapRecovery) DeepCopyInto(out *BootstrapRecovery) { } if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } } @@ -709,7 +716,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.SuperuserSecret != nil { in, out := &in.SuperuserSecret, &out.SuperuserSecret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.EnableSuperuserAccess != nil { @@ -724,7 +731,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]api.LocalObjectReference, len(*in)) + *out = make([]LocalObjectReference, len(*in)) copy(*out, *in) } in.StorageConfiguration.DeepCopyInto(&out.StorageConfiguration) @@ -832,11 +839,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins - *out = make(PluginConfigurationList, len(*in)) + *out = make([]PluginConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Probes != nil { + in, out := &in.Probes, &out.Probes + *out = new(ProbesConfiguration) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. @@ -854,7 +866,7 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = *in if in.InstancesStatus != nil { in, out := &in.InstancesStatus, &out.InstancesStatus - *out = make(map[utils.PodStatus][]string, len(*in)) + *out = make(map[PodStatus][]string, len(*in)) for key, val := range *in { var outVal []string if val == nil { @@ -946,6 +958,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.PGDataImageInfo != nil { + in, out := &in.PGDataImageInfo, &out.PGDataImageInfo + *out = new(ImageInfo) + **out = **in + } if in.PluginStatus != nil { in, out := &in.PluginStatus, &out.PluginStatus *out = make([]PluginStatus, len(*in)) @@ -1022,7 +1039,7 @@ func (in *Database) DeepCopyInto(out *Database) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. @@ -1075,6 +1092,36 @@ func (in *DatabaseList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObjectSpec) DeepCopyInto(out *DatabaseObjectSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObjectSpec. +func (in *DatabaseObjectSpec) DeepCopy() *DatabaseObjectSpec { + if in == nil { + return nil + } + out := new(DatabaseObjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObjectStatus) DeepCopyInto(out *DatabaseObjectStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObjectStatus. +func (in *DatabaseObjectStatus) DeepCopy() *DatabaseObjectStatus { + if in == nil { + return nil + } + out := new(DatabaseObjectStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseRoleRef) DeepCopyInto(out *DatabaseRoleRef) { *out = *in @@ -1109,6 +1156,23 @@ func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { *out = new(int) **out = **in } + if in.Schemas != nil { + in, out := &in.Schemas, &out.Schemas + *out = make([]SchemaSpec, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]ExtensionSpec, len(*in)) + copy(*out, *in) + } + if in.FDWs != nil { + in, out := &in.FDWs, &out.FDWs + *out = make([]FDWSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. @@ -1124,6 +1188,26 @@ func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { *out = *in + if in.Applied != nil { + in, out := &in.Applied, &out.Applied + *out = new(bool) + **out = **in + } + if in.Schemas != nil { + in, out := &in.Schemas, &out.Schemas + *out = make([]DatabaseObjectStatus, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]DatabaseObjectStatus, len(*in)) + copy(*out, *in) + } + if in.FDWs != nil { + in, out := &in.FDWs, &out.FDWs + *out = make([]DatabaseObjectStatus, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. @@ -1190,6 +1274,53 @@ func (in *EphemeralVolumesSizeLimitConfiguration) DeepCopy() *EphemeralVolumesSi return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionConfiguration) DeepCopyInto(out *ExtensionConfiguration) { + *out = *in + out.ImageVolumeSource = in.ImageVolumeSource + if in.ExtensionControlPath != nil { + in, out := &in.ExtensionControlPath, &out.ExtensionControlPath + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DynamicLibraryPath != nil { + in, out := &in.DynamicLibraryPath, &out.DynamicLibraryPath + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LdLibraryPath != nil { + in, out := &in.LdLibraryPath, &out.LdLibraryPath + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionConfiguration. +func (in *ExtensionConfiguration) DeepCopy() *ExtensionConfiguration { + if in == nil { + return nil + } + out := new(ExtensionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionSpec) DeepCopyInto(out *ExtensionSpec) { + *out = *in + out.DatabaseObjectSpec = in.DatabaseObjectSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionSpec. +func (in *ExtensionSpec) DeepCopy() *ExtensionSpec { + if in == nil { + return nil + } + out := new(ExtensionSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) { *out = *in @@ -1222,7 +1353,12 @@ func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) { } if in.BarmanObjectStore != nil { in, out := &in.BarmanObjectStore, &out.BarmanObjectStore - *out = new(pkgapi.BarmanObjectStoreConfiguration) + *out = new(BarmanObjectStoreConfiguration) + (*in).DeepCopyInto(*out) + } + if in.PluginConfiguration != nil { + in, out := &in.PluginConfiguration, &out.PluginConfiguration + *out = new(PluginConfiguration) (*in).DeepCopyInto(*out) } } @@ -1237,6 +1373,110 @@ func (in *ExternalCluster) DeepCopy() *ExternalCluster { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FDWSpec) DeepCopyInto(out *FDWSpec) { + *out = *in + out.DatabaseObjectSpec = in.DatabaseObjectSpec + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionSpec, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]UsageSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FDWSpec. +func (in *FDWSpec) DeepCopy() *FDWSpec { + if in == nil { + return nil + } + out := new(FDWSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverQuorum) DeepCopyInto(out *FailoverQuorum) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverQuorum. +func (in *FailoverQuorum) DeepCopy() *FailoverQuorum { + if in == nil { + return nil + } + out := new(FailoverQuorum) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FailoverQuorum) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverQuorumList) DeepCopyInto(out *FailoverQuorumList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FailoverQuorum, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverQuorumList. +func (in *FailoverQuorumList) DeepCopy() *FailoverQuorumList { + if in == nil { + return nil + } + out := new(FailoverQuorumList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FailoverQuorumList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverQuorumStatus) DeepCopyInto(out *FailoverQuorumStatus) { + *out = *in + if in.StandbyNames != nil { + in, out := &in.StandbyNames, &out.StandbyNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverQuorumStatus. +func (in *FailoverQuorumStatus) DeepCopy() *FailoverQuorumStatus { + if in == nil { + return nil + } + out := new(FailoverQuorumStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageCatalog) DeepCopyInto(out *ImageCatalog) { *out = *in @@ -1331,6 +1571,21 @@ func (in *ImageCatalogSpec) DeepCopy() *ImageCatalogSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInfo) DeepCopyInto(out *ImageInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInfo. +func (in *ImageInfo) DeepCopy() *ImageInfo { + if in == nil { + return nil + } + out := new(ImageInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Import) DeepCopyInto(out *Import) { *out = *in @@ -1350,6 +1605,16 @@ func (in *Import) DeepCopyInto(out *Import) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.PgDumpExtraOptions != nil { + in, out := &in.PgDumpExtraOptions, &out.PgDumpExtraOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PgRestoreExtraOptions != nil { + in, out := &in.PgRestoreExtraOptions, &out.PgRestoreExtraOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Import. @@ -1407,6 +1672,26 @@ func (in *InstanceReportedState) DeepCopy() *InstanceReportedState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsolationCheckConfiguration) DeepCopyInto(out *IsolationCheckConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsolationCheckConfiguration. +func (in *IsolationCheckConfiguration) DeepCopy() *IsolationCheckConfiguration { + if in == nil { + return nil + } + out := new(IsolationCheckConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LDAPBindAsAuth) DeepCopyInto(out *LDAPBindAsAuth) { *out = *in @@ -1467,6 +1752,27 @@ func (in *LDAPConfig) DeepCopy() *LDAPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LivenessProbe) DeepCopyInto(out *LivenessProbe) { + *out = *in + in.Probe.DeepCopyInto(&out.Probe) + if in.IsolationCheck != nil { + in, out := &in.IsolationCheck, &out.IsolationCheck + *out = new(IsolationCheckConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LivenessProbe. +func (in *LivenessProbe) DeepCopy() *LivenessProbe { + if in == nil { + return nil + } + out := new(LivenessProbe) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedConfiguration) DeepCopyInto(out *ManagedConfiguration) { *out = *in @@ -1630,12 +1936,12 @@ func (in *MonitoringConfiguration) DeepCopyInto(out *MonitoringConfiguration) { } if in.CustomQueriesConfigMap != nil { in, out := &in.CustomQueriesConfigMap, &out.CustomQueriesConfigMap - *out = make([]api.ConfigMapKeySelector, len(*in)) + *out = make([]ConfigMapKeySelector, len(*in)) copy(*out, *in) } if in.CustomQueriesSecret != nil { in, out := &in.CustomQueriesSecret, &out.CustomQueriesSecret - *out = make([]api.SecretKeySelector, len(*in)) + *out = make([]SecretKeySelector, len(*in)) copy(*out, *in) } if in.TLSConfig != nil { @@ -1714,6 +2020,37 @@ func (in *OnlineConfiguration) DeepCopy() *OnlineConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionSpec) DeepCopyInto(out *OptionSpec) { + *out = *in + out.OptionSpecValue = in.OptionSpecValue +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionSpec. +func (in *OptionSpec) DeepCopy() *OptionSpec { + if in == nil { + return nil + } + out := new(OptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionSpecValue) DeepCopyInto(out *OptionSpecValue) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionSpecValue. +func (in *OptionSpecValue) DeepCopy() *OptionSpecValue { + if in == nil { + return nil + } + out := new(OptionSpecValue) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PasswordState) DeepCopyInto(out *PasswordState) { *out = *in @@ -1770,7 +2107,7 @@ func (in *PgBouncerSpec) DeepCopyInto(out *PgBouncerSpec) { *out = *in if in.AuthQuerySecret != nil { in, out := &in.AuthQuerySecret, &out.AuthQuerySecret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.Parameters != nil { @@ -1810,6 +2147,11 @@ func (in *PluginConfiguration) DeepCopyInto(out *PluginConfiguration) { *out = new(bool) **out = **in } + if in.IsWALArchiver != nil { + in, out := &in.IsWALArchiver, &out.IsWALArchiver + *out = new(bool) + **out = **in + } if in.Parameters != nil { in, out := &in.Parameters, &out.Parameters *out = make(map[string]string, len(*in)) @@ -1829,27 +2171,6 @@ func (in *PluginConfiguration) DeepCopy() *PluginConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in PluginConfigurationList) DeepCopyInto(out *PluginConfigurationList) { - { - in := &in - *out = make(PluginConfigurationList, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfigurationList. -func (in PluginConfigurationList) DeepCopy() PluginConfigurationList { - if in == nil { - return nil - } - out := new(PluginConfigurationList) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PluginStatus) DeepCopyInto(out *PluginStatus) { *out = *in @@ -1873,6 +2194,11 @@ func (in *PluginStatus) DeepCopyInto(out *PluginStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RestoreJobHookCapabilities != nil { + in, out := &in.RestoreJobHookCapabilities, &out.RestoreJobHookCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus. @@ -2053,7 +2379,7 @@ func (in *PoolerSecrets) DeepCopy() *PoolerSecrets { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PoolerSpec) DeepCopyInto(out *PoolerSpec) { *out = *in - out.Cluster = in.Cluster + in.Cluster.DeepCopyInto(&out.Cluster) if in.Instances != nil { in, out := &in.Instances, &out.Instances *out = new(int32) @@ -2152,9 +2478,16 @@ func (in *PostgresConfiguration) DeepCopyInto(out *PostgresConfiguration) { *out = new(LDAPConfig) (*in).DeepCopyInto(*out) } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresConfiguration. + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]ExtensionConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresConfiguration. func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration { if in == nil { return nil @@ -2164,6 +2497,242 @@ func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeWithStrategy) DeepCopyInto(out *ProbeWithStrategy) { + *out = *in + in.Probe.DeepCopyInto(&out.Probe) + if in.MaximumLag != nil { + in, out := &in.MaximumLag, &out.MaximumLag + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeWithStrategy. +func (in *ProbeWithStrategy) DeepCopy() *ProbeWithStrategy { + if in == nil { + return nil + } + out := new(ProbeWithStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbesConfiguration) DeepCopyInto(out *ProbesConfiguration) { + *out = *in + if in.Startup != nil { + in, out := &in.Startup, &out.Startup + *out = new(ProbeWithStrategy) + (*in).DeepCopyInto(*out) + } + if in.Liveness != nil { + in, out := &in.Liveness, &out.Liveness + *out = new(LivenessProbe) + (*in).DeepCopyInto(*out) + } + if in.Readiness != nil { + in, out := &in.Readiness, &out.Readiness + *out = new(ProbeWithStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbesConfiguration. +func (in *ProbesConfiguration) DeepCopy() *ProbesConfiguration { + if in == nil { + return nil + } + out := new(ProbesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Publication) DeepCopyInto(out *Publication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Publication. +func (in *Publication) DeepCopy() *Publication { + if in == nil { + return nil + } + out := new(Publication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Publication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationList) DeepCopyInto(out *PublicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Publication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationList. +func (in *PublicationList) DeepCopy() *PublicationList { + if in == nil { + return nil + } + out := new(PublicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PublicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationSpec) DeepCopyInto(out *PublicationSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Target.DeepCopyInto(&out.Target) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationSpec. +func (in *PublicationSpec) DeepCopy() *PublicationSpec { + if in == nil { + return nil + } + out := new(PublicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationStatus) DeepCopyInto(out *PublicationStatus) { + *out = *in + if in.Applied != nil { + in, out := &in.Applied, &out.Applied + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationStatus. +func (in *PublicationStatus) DeepCopy() *PublicationStatus { + if in == nil { + return nil + } + out := new(PublicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationTarget) DeepCopyInto(out *PublicationTarget) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]PublicationTargetObject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTarget. +func (in *PublicationTarget) DeepCopy() *PublicationTarget { + if in == nil { + return nil + } + out := new(PublicationTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationTargetObject) DeepCopyInto(out *PublicationTargetObject) { + *out = *in + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(PublicationTargetTable) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTargetObject. +func (in *PublicationTargetObject) DeepCopy() *PublicationTargetObject { + if in == nil { + return nil + } + out := new(PublicationTargetObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationTargetTable) DeepCopyInto(out *PublicationTargetTable) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTargetTable. +func (in *PublicationTargetTable) DeepCopy() *PublicationTargetTable { + if in == nil { + return nil + } + out := new(PublicationTargetTable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecoveryTarget) DeepCopyInto(out *RecoveryTarget) { *out = *in @@ -2264,7 +2833,7 @@ func (in *RoleConfiguration) DeepCopyInto(out *RoleConfiguration) { *out = *in if in.PasswordSecret != nil { in, out := &in.PasswordSecret, &out.PasswordSecret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.ValidUntil != nil { @@ -2293,33 +2862,17 @@ func (in *RoleConfiguration) DeepCopy() *RoleConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateStatus) DeepCopyInto(out *RollingUpdateStatus) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatus. -func (in *RollingUpdateStatus) DeepCopy() *RollingUpdateStatus { - if in == nil { - return nil - } - out := new(RollingUpdateStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQLRefs) DeepCopyInto(out *SQLRefs) { *out = *in if in.SecretRefs != nil { in, out := &in.SecretRefs, &out.SecretRefs - *out = make([]api.SecretKeySelector, len(*in)) + *out = make([]SecretKeySelector, len(*in)) copy(*out, *in) } if in.ConfigMapRefs != nil { in, out := &in.ConfigMapRefs, &out.ConfigMapRefs - *out = make([]api.ConfigMapKeySelector, len(*in)) + *out = make([]ConfigMapKeySelector, len(*in)) copy(*out, *in) } } @@ -2406,7 +2959,7 @@ func (in *ScheduledBackupSpec) DeepCopyInto(out *ScheduledBackupSpec) { *out = new(bool) **out = **in } - out.Cluster = in.Cluster + in.Cluster.DeepCopyInto(&out.Cluster) if in.PluginConfiguration != nil { in, out := &in.PluginConfiguration, &out.PluginConfiguration *out = new(BackupPluginConfiguration) @@ -2461,6 +3014,22 @@ func (in *ScheduledBackupStatus) DeepCopy() *ScheduledBackupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaSpec) DeepCopyInto(out *SchemaSpec) { + *out = *in + out.DatabaseObjectSpec = in.DatabaseObjectSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaSpec. +func (in *SchemaSpec) DeepCopy() *SchemaSpec { + if in == nil { + return nil + } + out := new(SchemaSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretVersion) DeepCopyInto(out *SecretVersion) { *out = *in @@ -2575,6 +3144,108 @@ func (in *StorageConfiguration) DeepCopy() *StorageConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subscription) DeepCopyInto(out *Subscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. +func (in *Subscription) DeepCopy() *Subscription { + if in == nil { + return nil + } + out := new(Subscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. +func (in *SubscriptionList) DeepCopy() *SubscriptionList { + if in == nil { + return nil + } + out := new(SubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. +func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec { + if in == nil { + return nil + } + out := new(SubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { + *out = *in + if in.Applied != nil { + in, out := &in.Applied, &out.Applied + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. +func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { + if in == nil { + return nil + } + out := new(SubscriptionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SwitchReplicaClusterStatus) DeepCopyInto(out *SwitchReplicaClusterStatus) { *out = *in @@ -2623,7 +3294,6 @@ func (in *SynchronizeReplicasConfiguration) DeepCopyInto(out *SynchronizeReplica *out = make([]string, len(*in)) copy(*out, *in) } - in.synchronizeReplicasCache.DeepCopyInto(&out.synchronizeReplicasCache) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynchronizeReplicasConfiguration. @@ -2731,6 +3401,21 @@ func (in *Topology) DeepCopy() *Topology { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsageSpec) DeepCopyInto(out *UsageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsageSpec. +func (in *UsageSpec) DeepCopy() *UsageSpec { + if in == nil { + return nil + } + out := new(UsageSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotConfiguration) DeepCopyInto(out *VolumeSnapshotConfiguration) { *out = *in diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go index b19d0f746f..09069bad4e 100644 --- a/cmd/kubectl-cnpg/main.go +++ b/cmd/kubectl-cnpg/main.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ /* @@ -66,8 +69,6 @@ func main() { PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { logFlags.ConfigureLogging() - plugin.ConfigureColor(cmd) - // If we're invoking the completion command we shouldn't try to create // a Kubernetes client and we just let the Cobra flow to continue if cmd.Name() == "completion" || cmd.Name() == "version" || @@ -75,6 +76,8 @@ func main() { return nil } + plugin.ConfigureColor(cmd) + return plugin.SetupKubernetesClient(configFlags) }, } @@ -82,6 +85,33 @@ func main() { logFlags.AddFlags(rootCmd.PersistentFlags()) configFlags.AddFlags(rootCmd.PersistentFlags()) + adminGroup := &cobra.Group{ + ID: plugin.GroupIDAdmin, + Title: "Operator-level administration", + } + + troubleshootingGroup := &cobra.Group{ + ID: plugin.GroupIDTroubleshooting, + Title: "Troubleshooting", + } + + pgClusterGroup := &cobra.Group{ + ID: plugin.GroupIDCluster, + Title: "Cluster administration", + } + + pgDatabaseGroup := &cobra.Group{ + ID: plugin.GroupIDDatabase, + Title: "Database administration", + } + + miscGroup := &cobra.Group{ + ID: plugin.GroupIDMiscellaneous, + Title: "Miscellaneous", + } + + rootCmd.AddGroup(adminGroup, troubleshootingGroup, pgClusterGroup, pgDatabaseGroup, miscGroup) + subcommands := []*cobra.Command{ backup.NewCmd(), certificate.NewCmd(), diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 8fe8a46f68..86bb259a89 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ /* @@ -40,6 +43,8 @@ import ( ) func main() { + cobra.EnableTraverseRunHooks = true + logFlags := &log.Flags{} cmd := &cobra.Command{ diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 96be8399da..c371fcefcf 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io @@ -33,7 +33,8 @@ spec: name: v1 schema: openAPIV3Schema: - description: Backup is the Schema for the backups API + description: A Backup resource is a request for a PostgreSQL backup by the + user. properties: apiVersion: description: |- @@ -141,6 +142,9 @@ spec: required: - cluster type: object + x-kubernetes-validations: + - message: BackupSpec is immutable once set + rule: oldSelf == self status: description: |- Most recently observed status of the backup. This data may not be up to @@ -318,6 +322,11 @@ spec: phase: description: The last backup status type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object s3Credentials: description: The credentials to use to upload data to S3 properties: diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 4581679377..af9fbd84e4 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index ae2bdc9475..ba89604e0a 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io @@ -146,7 +146,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -161,7 +160,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -327,7 +325,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -342,7 +339,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -435,8 +431,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -505,7 +501,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -520,7 +515,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -686,7 +680,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -701,7 +694,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1152,10 +1144,10 @@ spec: description: |- Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2` or `snappy`. + compression, default), `gzip`, `bzip2`, and `snappy`. enum: - - gzip - bzip2 + - gzip - snappy type: string encryption: @@ -1343,11 +1335,15 @@ spec: compression: description: |- Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. enum: - - gzip - bzip2 + - gzip + - lz4 - snappy + - xz + - zstd type: string encryption: description: |- @@ -1495,6 +1491,12 @@ spec: initdb: description: Bootstrap the cluster via initdb properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string dataChecksums: description: |- Whether the `-k` option should be passed to initdb, @@ -1508,6 +1510,18 @@ spec: description: The value to be passed as option `--encoding` for initdb (default:`UTF8`) type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string import: description: |- Bootstraps the new cluster by importing data from an existing PostgreSQL @@ -1518,6 +1532,24 @@ spec: items: type: string type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array postImportApplicationSQL: description: |- List of SQL queries to be executed as a superuser in the application @@ -1558,6 +1590,10 @@ spec: - source - type type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string localeCType: description: The value to be passed as option `--lc-ctype` for initdb (default:`C`) @@ -1566,6 +1602,11 @@ spec: description: The value to be passed as option `--lc-collate` for initdb (default:`C`) type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string options: description: |- The list of options that must be passed to initdb when creating the cluster. @@ -1771,6 +1812,16 @@ spec: minimum: 1 type: integer type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' pg_basebackup: description: |- Bootstrap the cluster taking a physical backup of another compatible @@ -2076,7 +2127,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2134,6 +2187,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2193,6 +2283,7 @@ spec: sources to the pods to be used by Env items: description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -2212,8 +2303,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend to each key in - the ConfigMap. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -2449,15 +2541,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: description: |- @@ -2599,10 +2689,10 @@ spec: description: |- Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2` or `snappy`. + compression, default), `gzip`, `bzip2`, and `snappy`. enum: - - gzip - bzip2 + - gzip - snappy type: string encryption: @@ -2790,11 +2880,15 @@ spec: compression: description: |- Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. enum: - - gzip - bzip2 + - gzip + - lz4 - snappy + - xz + - zstd type: string encryption: description: |- @@ -2878,6 +2972,32 @@ spec: - key type: object x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object sslCert: description: |- The reference to an SSL certificate to be used to connect to this @@ -2985,9 +3105,6 @@ spec: description: The major version of PostgreSQL we want to use from the ImageCatalog type: integer - x-kubernetes-validations: - - message: Major is immutable - rule: self == oldSelf name: description: Name is the name of resource being referenced type: string @@ -3194,18 +3311,13 @@ spec: It includes the type of service and its associated template specification. properties: selectorType: - allOf: - - enum: - - rw - - r - - ro - - enum: - - rw - - r - - ro description: |- SelectorType specifies the type of selectors that the service will have. Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro type: string serviceTemplate: description: ServiceTemplate is the template specification @@ -3557,13 +3669,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -3898,6 +4009,12 @@ spec: default: true description: Enabled is true if this plugin will be used type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean name: description: Name is the plugin name type: string @@ -3932,6 +4049,67 @@ spec: This should only be used for debugging and troubleshooting. Defaults to false. type: boolean + extensions: + description: The configuration of the extensions to be added + items: + description: |- + ExtensionConfiguration is the configuration used to add + PostgreSQL extensions to the Cluster. + properties: + dynamic_library_path: + description: |- + The list of directories inside the image which should be added to dynamic_library_path. + If not defined, defaults to "/lib". + items: + type: string + type: array + extension_control_path: + description: |- + The list of directories inside the image which should be added to extension_control_path. + If not defined, defaults to "/share". + items: + type: string + type: array + image: + description: The image containing the extension, required + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + x-kubernetes-validations: + - message: An image reference is required + rule: has(self.reference) + ld_library_path: + description: The list of directories inside the image which + should be added to ld_library_path. + items: + type: string + type: array + name: + description: The name of the extension, required + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - image + - name + type: object + type: array ldap: description: Options to specify LDAP configuration properties: @@ -4058,6 +4236,20 @@ spec: description: Configuration of the PostgreSQL synchronous replication feature properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string maxStandbyNamesFromCluster: description: |- Specifies the maximum number of local cluster pods that can be @@ -4102,6 +4294,12 @@ spec: - method - number type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) type: object primaryUpdateMethod: default: restart @@ -4130,6 +4328,211 @@ spec: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass for more information type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + isolationCheck: + description: |- + Configure the feature that extends the liveness probe for a primary + instance. In addition to the basic checks, this verifies whether the + primary is isolated from the Kubernetes API server and from its + replicas, ensuring that it can be safely shut down if network + partition or API unavailability is detected. Enabled by default. + properties: + connectionTimeout: + default: 1000 + description: Timeout in milliseconds for connections during + the primary isolation check + type: integer + enabled: + default: true + description: Whether primary isolation checking is enabled + for the liveness probe + type: boolean + requestTimeout: + default: 1000 + description: Timeout in milliseconds for requests during + the primary isolation check + type: integer + type: object + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object projectedVolumeTemplate: description: |- Template to be used to define projected volumes, projected volumes will be mounted @@ -4146,10 +4549,13 @@ spec: format: int32 type: integer sources: - description: sources is the list of volume projections + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. items: - description: Projection that may be projected along with other - supported volume types + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. properties: clusterTrustBundle: description: |- @@ -4375,6 +4781,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be addressed + to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4537,6 +5048,15 @@ spec: This can only be set at creation time. By default set to `_cnpg_`. pattern: ^[0-9a-z_]*$ type: string + synchronizeLogicalDecoding: + description: |- + When enabled, the operator automatically manages synchronization of logical + decoding (replication) slots across high-availability clusters. + + Requires one of the following conditions: + - PostgreSQL version 17 or later + - PostgreSQL version < 17 with pg_failover_slots extension enabled + type: boolean type: object synchronizeReplicas: description: Configures the synchronization of the user defined @@ -4576,7 +5096,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -4589,6 +5109,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -4891,15 +5417,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: description: |- @@ -5149,15 +5673,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: description: |- @@ -5326,7 +5848,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -5337,7 +5858,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -5561,15 +6081,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: description: |- @@ -5629,10 +6147,6 @@ spec: - hash type: object type: array - azurePVCUpdateEnabled: - description: AzurePVCUpdateEnabled shows if the PVC online upgrade - is enabled for this cluster - type: boolean certificates: description: The configuration for the CA and related certificates, initialized with defaults. @@ -5794,14 +6308,18 @@ spec: firstRecoverabilityPoint: description: |- The first recoverability point, stored as a date in RFC3339 format. - This field is calculated from the content of FirstRecoverabilityPointByMethod + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. type: string firstRecoverabilityPointByMethod: additionalProperties: format: date-time type: string - description: The first recoverability point, stored as a date in RFC3339 - format, per backup method type + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. type: object healthyPVC: description: List of all the PVCs not dangling nor initializing @@ -5831,6 +6349,9 @@ spec: description: InstanceReportedState describes the last reported state of an instance during a reconciliation loop properties: + ip: + description: IP address of the instance + type: string isPrimary: description: indicates if an instance is the primary one type: boolean @@ -5856,7 +6377,10 @@ spec: format: int32 type: integer lastFailedBackup: - description: Stored as a date in RFC3339 format + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. type: string lastPromotionToken: description: |- @@ -5865,15 +6389,19 @@ spec: type: string lastSuccessfulBackup: description: |- - Last successful backup, stored as a date in RFC3339 format - This field is calculated from the content of LastSuccessfulBackupByMethod + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. type: string lastSuccessfulBackupByMethod: additionalProperties: format: date-time type: string - description: Last successful backup, stored as a date in RFC3339 format, - per backup method type + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. type: object latestGeneratedNode: description: ID of the latest generated node (used to avoid node name @@ -5921,6 +6449,20 @@ spec: description: OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object phase: description: Current phase of the cluster type: string @@ -5956,6 +6498,13 @@ spec: items: type: string type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array status: description: Status contain the status reported by the plugin through the SetStatusInCluster interface @@ -6069,6 +6618,9 @@ spec: of switching a cluster to a replica cluster. type: boolean type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string tablespacesStatus: description: TablespacesStatus reports the state of the declarative tablespaces in the cluster diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 9a09276061..81c50608b2 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io @@ -24,12 +24,12 @@ spec: - jsonPath: .spec.name name: PG Name type: string - - jsonPath: .status.ready - name: Ready + - jsonPath: .status.applied + name: Applied type: boolean - - description: Latest error message - jsonPath: .status.error - name: Error + - description: Latest reconciliation message + jsonPath: .status.message + name: Message type: string name: v1 schema: @@ -59,10 +59,22 @@ spec: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: allowConnections: - description: True when connections to this database are allowed + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf cluster: - description: The corresponding cluster + description: The name of the PostgreSQL cluster hosting the database. properties: name: default: "" @@ -75,52 +87,365 @@ spec: type: string type: object x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf connectionLimit: description: |- - Connection limit, -1 means no limit and -2 means the - database is not valid + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. type: integer databaseReclaimPolicy: default: retain - description: The policy for end-of-life maintenance of this database + description: The policy for end-of-life maintenance of this database. enum: - delete - retain type: string encoding: - description: The encoding (cannot be changed) + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. type: string x-kubernetes-validations: - message: encoding is immutable rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + fdws: + description: The list of foreign data wrappers to be managed in the + database + items: + description: FDWSpec configures an Foreign Data Wrapper in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + handler: + description: |- + Name of the handler function (e.g., "postgres_fdw_handler"). + This will be empty if no handler is specified. In that case, + the default handler is registered when the FDW extension is created. + type: string + name: + description: Name of the extension/schema + type: string + options: + description: |- + Options specifies the configuration options for the FDW + (key is the option name, value is the option value). + items: + description: OptionSpec holds the name, value and the ensure + field for an option + properties: + ensure: + default: present + description: |- + Specifies whether an option should be present or absent in + the database. If set to `present`, the option will be + created if it does not exist. If set to `absent`, the + option will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the option + type: string + value: + description: Value of the option + type: string + required: + - name + - value + type: object + type: array + owner: + description: |- + Owner specifies the database role that will own the Foreign Data Wrapper. + The role must have superuser privileges in the target database. + type: string + usage: + description: List of roles for which `USAGE` privileges on the + FDW are granted or revoked. + items: + description: UsageSpec configures a usage for a foreign data + wrapper + properties: + name: + description: Name of the usage + type: string + type: + default: grant + description: The type of usage + enum: + - grant + - revoke + type: string + required: + - name + type: object + type: array + validator: + description: |- + Name of the validator function (e.g., "postgres_fdw_validator"). + This will be empty if no validator is specified. In that case, + the default validator is registered when the FDW extension is created. + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf isTemplate: - description: True when the database is a template + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf name: - description: The name inside PostgreSQL + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. type: string x-kubernetes-validations: - message: name is immutable rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' owner: - description: The owner + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array tablespace: - description: The default tablespace of this database + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf required: - cluster - name - owner type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' status: description: |- Most recently observed status of the Database. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: - error: - description: Error is the reconciliation error message + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + fdws: + description: FDWs is the status of the managed FDWs + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message type: string observedGeneration: description: |- @@ -128,9 +453,28 @@ spec: desired state that was synchronized format: int64 type: integer - ready: - description: Ready is true if the database was reconciled correctly - type: boolean + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array type: object required: - metadata diff --git a/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml b/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml new file mode 100644 index 0000000000..738db5fc02 --- /dev/null +++ b/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: failoverquorums.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: FailoverQuorum + listKind: FailoverQuorumList + plural: failoverquorums + singular: failoverquorum + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + FailoverQuorum contains the information about the current failover + quorum status of a PG cluster. It is updated by the instance manager + of the primary node and reset to zero by the operator to trigger + an update. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: Most recently observed status of the failover quorum. + properties: + method: + description: Contains the latest reported Method value. + type: string + primary: + description: |- + Primary is the name of the primary instance that updated + this object the latest time. + type: string + standbyNames: + description: |- + StandbyNames is the list of potentially synchronous + instance names. + items: + type: string + type: array + standbyNumber: + description: |- + StandbyNumber is the number of synchronous standbys that transactions + need to wait for replies from. + type: integer + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index c961bf2eda..fdea556ad5 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 7e5ffcfd45..7b2b6d620b 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io @@ -701,13 +701,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -1061,7 +1060,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1076,7 +1074,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1244,7 +1241,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1259,7 +1255,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1353,8 +1348,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1425,7 +1420,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1440,7 +1434,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1608,7 +1601,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1623,7 +1615,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array @@ -1756,8 +1747,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1816,6 +1808,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1878,14 +1907,14 @@ spec: envFrom: description: |- List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple + The keys defined within a source may consist of any printable ASCII characters except '='. + When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. items: description: EnvFromSource represents the source of - a set of ConfigMaps + a set of ConfigMaps or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -1906,8 +1935,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -1958,7 +1988,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -1973,7 +2004,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2024,8 +2055,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -2038,8 +2069,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -2071,7 +2102,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2086,7 +2118,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2137,8 +2169,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -2151,8 +2183,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -2171,6 +2203,12 @@ spec: - port type: object type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string type: object livenessProbe: description: |- @@ -2180,7 +2218,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2201,8 +2240,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -2221,7 +2259,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2289,7 +2327,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -2395,7 +2433,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2416,8 +2455,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -2436,7 +2474,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2504,7 +2542,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -2578,7 +2616,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2592,6 +2630,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -2627,10 +2671,10 @@ spec: restartPolicy: description: |- RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, + This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: + Additionally, setting the RestartPolicy as "Always" for the init container will + have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" @@ -2642,6 +2686,59 @@ spec: init container is started, or after any startupProbe has successfully completed. type: string + restartPolicyRules: + description: |- + Represents a list of rules to be checked to determine if the + container should be restarted on exit. The rules are evaluated in + order. Once a rule matches a container exit condition, the remaining + rules are ignored. If no rule matches the container exit condition, + the Container-level restart policy determines the whether the container + is restarted or not. Constraints on the rules: + - At most 20 rules are allowed. + - Rules can have the same action. + - Identical rules are not forbidden in validations. + When rules are specified, container MUST set RestartPolicy explicitly + even it if matches the Pod's RestartPolicy. + items: + description: ContainerRestartRule describes how a + container exit is handled. + properties: + action: + description: |- + Specifies the action taken on a container exit if the requirements + are satisfied. The only possible value is "Restart" to restart the + container. + type: string + exitCodes: + description: Represents the exit codes to check + on container exits. + properties: + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Possible values are: + - In: the requirement is satisfied if the container exit code is in the + set of specified values. + - NotIn: the requirement is satisfied if the container exit code is + not in the set of specified values. + type: string + values: + description: |- + Specifies the set of values to check for container exit codes. + At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic securityContext: description: |- SecurityContext defines the security options the container should be run with. @@ -2715,7 +2812,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -2848,7 +2945,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2869,8 +2967,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -2889,7 +2986,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2957,7 +3054,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -3172,9 +3269,13 @@ spec: options of a pod. properties: name: - description: Required. + description: |- + Name is this DNS resolver option's name. + Required. type: string value: + description: Value is this DNS resolver option's + value. type: string type: object type: array @@ -3258,8 +3359,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -3318,6 +3420,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3380,14 +3519,14 @@ spec: envFrom: description: |- List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple + The keys defined within a source may consist of any printable ASCII characters except '='. + When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. items: description: EnvFromSource represents the source of - a set of ConfigMaps + a set of ConfigMaps or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -3408,8 +3547,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -3457,7 +3597,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3472,7 +3613,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3523,8 +3664,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -3537,8 +3678,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -3570,7 +3711,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3585,7 +3727,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3636,8 +3778,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -3650,8 +3792,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -3670,12 +3812,19 @@ spec: - port type: object type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string type: object livenessProbe: description: Probes are not allowed for ephemeral containers. properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3696,8 +3845,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -3716,7 +3864,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3784,7 +3932,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -3878,7 +4026,8 @@ spec: description: Probes are not allowed for ephemeral containers. properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3899,8 +4048,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -3919,7 +4067,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3987,7 +4135,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -4060,7 +4208,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -4074,6 +4222,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -4110,9 +4264,53 @@ spec: description: |- Restart policy for the container to manage the restart behavior of each container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. + You cannot set this field on ephemeral containers. type: string + restartPolicyRules: + description: |- + Represents a list of rules to be checked to determine if the + container should be restarted on exit. You cannot set this field on + ephemeral containers. + items: + description: ContainerRestartRule describes how a + container exit is handled. + properties: + action: + description: |- + Specifies the action taken on a container exit if the requirements + are satisfied. The only possible value is "Restart" to restart the + container. + type: string + exitCodes: + description: Represents the exit codes to check + on container exits. + properties: + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Possible values are: + - In: the requirement is satisfied if the container exit code is in the + set of specified values. + - NotIn: the requirement is satisfied if the container exit code is + not in the set of specified values. + type: string + values: + description: |- + Specifies the set of values to check for container exit codes. + At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic securityContext: description: |- Optional: SecurityContext defines the security options the ephemeral container should be run with. @@ -4185,7 +4383,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -4311,7 +4509,8 @@ spec: description: Probes are not allowed for ephemeral containers. properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -4332,8 +4531,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -4352,7 +4550,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -4420,7 +4618,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -4651,7 +4849,9 @@ spec: hostNetwork: description: |- Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. + When using HostNetwork you should specify ports so the scheduler is aware. + When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, + and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false. type: boolean hostPID: @@ -4676,6 +4876,19 @@ spec: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. type: string + hostnameOverride: + description: |- + HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. + This field only specifies the pod's hostname and does not affect its DNS records. + When this field is set to a non-empty string: + - It takes precedence over the values set in `hostname` and `subdomain`. + - The Pod's hostname will be set to this value. + - `setHostnameAsFQDN` must be nil or set to false. + - `hostNetwork` must be set to false. + + This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. + Requires the HostnameOverride feature gate to be enabled. + type: string imagePullSecrets: description: |- ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. @@ -4711,7 +4924,7 @@ spec: Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers + that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. @@ -4757,8 +4970,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4817,6 +5031,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4879,14 +5130,14 @@ spec: envFrom: description: |- List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple + The keys defined within a source may consist of any printable ASCII characters except '='. + When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. items: description: EnvFromSource represents the source of - a set of ConfigMaps + a set of ConfigMaps or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -4907,8 +5158,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -4959,7 +5211,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -4974,7 +5227,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5025,8 +5278,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -5039,8 +5292,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -5072,7 +5325,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5087,7 +5341,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5138,8 +5392,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -5152,8 +5406,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -5172,6 +5426,12 @@ spec: - port type: object type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string type: object livenessProbe: description: |- @@ -5181,7 +5441,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5202,8 +5463,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -5222,7 +5482,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5290,7 +5550,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -5396,7 +5656,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5417,8 +5678,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -5437,7 +5697,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5505,7 +5765,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -5579,7 +5839,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -5593,6 +5853,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -5628,10 +5894,10 @@ spec: restartPolicy: description: |- RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, + This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: + Additionally, setting the RestartPolicy as "Always" for the init container will + have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" @@ -5643,6 +5909,59 @@ spec: init container is started, or after any startupProbe has successfully completed. type: string + restartPolicyRules: + description: |- + Represents a list of rules to be checked to determine if the + container should be restarted on exit. The rules are evaluated in + order. Once a rule matches a container exit condition, the remaining + rules are ignored. If no rule matches the container exit condition, + the Container-level restart policy determines the whether the container + is restarted or not. Constraints on the rules: + - At most 20 rules are allowed. + - Rules can have the same action. + - Identical rules are not forbidden in validations. + When rules are specified, container MUST set RestartPolicy explicitly + even it if matches the Pod's RestartPolicy. + items: + description: ContainerRestartRule describes how a + container exit is handled. + properties: + action: + description: |- + Specifies the action taken on a container exit if the requirements + are satisfied. The only possible value is "Restart" to restart the + container. + type: string + exitCodes: + description: Represents the exit codes to check + on container exits. + properties: + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Possible values are: + - In: the requirement is satisfied if the container exit code is in the + set of specified values. + - NotIn: the requirement is satisfied if the container exit code is + not in the set of specified values. + type: string + values: + description: |- + Specifies the set of values to check for container exit codes. + At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic securityContext: description: |- SecurityContext defines the security options the container should be run with. @@ -5716,7 +6035,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -5849,7 +6168,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5870,8 +6190,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -5890,7 +6209,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5958,7 +6277,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -6149,9 +6468,11 @@ spec: x-kubernetes-list-type: map nodeName: description: |- - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - the scheduler simply schedules this pod onto that node, assuming that it fits resource - requirements. + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename type: string nodeSelector: additionalProperties: @@ -6174,6 +6495,7 @@ spec: - spec.hostPID - spec.hostIPC - spec.hostUsers + - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile @@ -6184,6 +6506,7 @@ spec: - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile @@ -6277,7 +6600,10 @@ spec: This field is immutable. items: description: |- - PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. properties: @@ -6286,30 +6612,32 @@ spec: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. type: string - source: - description: Source describes where to find the ResourceClaim. - properties: - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - type: string - type: object + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string required: - name type: object @@ -6317,6 +6645,74 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object restartPolicy: description: |- Restart policy for all containers within the pod. @@ -6441,6 +6837,32 @@ spec: Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string seLinuxOptions: description: |- The SELinux context to be applied to all containers. @@ -6493,18 +6915,28 @@ spec: type: object supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -6768,7 +7200,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -6779,7 +7210,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -6837,6 +7267,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -6868,8 +7300,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk - mount on the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: @@ -6884,6 +7318,7 @@ spec: the blob storage type: string fsType: + default: ext4 description: |- fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. @@ -6897,6 +7332,7 @@ spec: set). defaults to shared' type: string readOnly: + default: false description: |- readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. @@ -6906,8 +7342,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service - mount on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -6926,8 +7364,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the - host that shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -6980,6 +7419,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7091,7 +7532,7 @@ spec: csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external - CSI drivers (Beta feature). + CSI drivers. properties: driver: description: |- @@ -7498,15 +7939,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: description: |- @@ -7562,6 +8001,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to @@ -7607,9 +8047,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached - to a kubelet's host machine. This depends on the Flocker - control service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -7625,6 +8065,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -7660,7 +8102,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -7684,12 +8126,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that + details Glusterfs topology. type: string path: description: |- @@ -7729,11 +8170,46 @@ spec: required: - path type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object iscsi: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support @@ -7760,6 +8236,7 @@ spec: description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: + default: default description: |- iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). @@ -7857,9 +8334,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host - machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -7875,8 +8352,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume - attached and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -7911,10 +8391,13 @@ spec: format: int32 type: integer sources: - description: sources is the list of volume projections + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. items: - description: Projection that may be projected - along with other supported volume types + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. properties: clusterTrustBundle: description: |- @@ -8152,6 +8635,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs + will be addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8244,8 +8832,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the - host that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8284,7 +8873,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8299,6 +8888,7 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string keyring: + default: /etc/ceph/keyring description: |- keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. @@ -8313,6 +8903,7 @@ spec: type: array x-kubernetes-list-type: atomic pool: + default: rbd description: |- pool is the rados pool name. Default is rbd. @@ -8343,6 +8934,7 @@ spec: type: object x-kubernetes-map-type: atomic user: + default: admin description: |- user is the rados user name. Default is admin. @@ -8353,10 +8945,12 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent - volume attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: + default: xfs description: |- fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. @@ -8397,6 +8991,7 @@ spec: communication with Gateway, default false type: boolean storageMode: + default: ThinProvisioned description: |- storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. @@ -8485,8 +9080,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume - attached and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -8531,8 +9127,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume - attached and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -8573,6 +9171,7 @@ spec: enum: - rw - ro + - r type: string required: - cluster diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml new file mode 100644 index 0000000000..7bb6d3c11a --- /dev/null +++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml @@ -0,0 +1,195 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 15fb35c0ba..aa6bc55005 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.19.0 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml new file mode 100644 index 0000000000..d5d0b7872d --- /dev/null +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -0,0 +1,149 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 5e4757d42c..ec827bc3a4 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,6 +11,10 @@ resources: - bases/postgresql.cnpg.io_imagecatalogs.yaml - bases/postgresql.cnpg.io_clusterimagecatalogs.yaml - bases/postgresql.cnpg.io_databases.yaml +- bases/postgresql.cnpg.io_publications.yaml +- bases/postgresql.cnpg.io_subscriptions.yaml +- bases/postgresql.cnpg.io_failoverquorums.yaml + # +kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. @@ -52,6 +56,17 @@ patches: # kind: CustomResourceDefinition # name: poolers.postgresql.cnpg.io #- path: patches/cainjection_in_databases.yaml +# target: +# kind: CustomResourceDefinition +# name: databases.postgresql.cnpg.io +#- path: patches/cainjection_in_publications.yaml +# target: +# kind: CustomResourceDefinition +# name: publications.postgresql.cnpg.io +#- path: patches/cainjection_in_subscriptions.yaml +# target: +# kind: CustomResourceDefinition +# name: subscriptions.postgresql.cnpg.io # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 9a299052ba..079ffd5c70 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -22,7 +22,7 @@ resources: - ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus patches: @@ -40,11 +40,6 @@ patches: name: controller-manager version: v1 -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -#- manager_auth_proxy_patch.yaml - # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index 43095c5fb2..0000000000 --- a/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - args: - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" - - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=10" - ports: - - containerPort: 8443 - name: https diff --git a/config/manager/default-monitoring.yaml b/config/manager/default-monitoring.yaml index 0078087ee2..ab4878bdb6 100644 --- a/config/manager/default-monitoring.yaml +++ b/config/manager/default-monitoring.yaml @@ -454,3 +454,35 @@ data: - setting: usage: "GAUGE" description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' diff --git a/config/manager/env_override.yaml b/config/manager/env_override.yaml index 867e3b7f83..633bff2e15 100644 --- a/config/manager/env_override.yaml +++ b/config/manager/env_override.yaml @@ -20,3 +20,7 @@ spec: - --webhook-port=9443 - --log-level=debug - --pprof-server=true + ports: + - containerPort: 6060 + name: pprof + protocol: TCP diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f0164e0937..4ac078c461 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -38,10 +38,12 @@ spec: args: - controller - --leader-elect + - --max-concurrent-reconciles=10 - --config-map-name=$(OPERATOR_DEPLOYMENT_NAME)-config - --secret-name=$(OPERATOR_DEPLOYMENT_NAME)-config - --webhook-port=9443 image: controller:latest + imagePullPolicy: Always name: manager ports: - containerPort: 8080 @@ -68,6 +70,13 @@ spec: port: 9443 scheme: HTTPS path: /readyz + startupProbe: + failureThreshold: 6 + periodSeconds: 5 + httpGet: + port: 9443 + scheme: HTTPS + path: /readyz readinessProbe: httpGet: port: 9443 diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index ad6f561a6d..00c0a11529 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -12,7 +12,6 @@ metadata: containerImage: $(OPERATOR_IMAGE_NAME) repository: https://github.com/cloudnative-pg/cloudnative-pg support: Community - olm.skipRange: '>= 1.18.0 < ${VERSION}' features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" @@ -29,50 +28,69 @@ spec: description: | Main features: - * Direct integration with Kubernetes API server for High Availability, - without requiring an external tool - * Self-Healing capability, through: - * failover of the primary instance by promoting the most aligned replica - * automated recreation of a replica - * Planned switchover of the primary instance by promoting a selected replica - * Scale up/down capabilities - * Definition of an arbitrary number of instances (minimum 1 - one primary server) - * Definition of the *read-write* service, to connect your applications to the only primary server of the cluster - * Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads - * Declarative management of PostgreSQL configuration - * Declarative management of Postgres roles, users and groups - * Support for Local Persistent Volumes with PVC templates - * Reuse of Persistent Volumes storage in Pods - * Separate volume for WAL files - * Rolling updates for PostgreSQL minor versions - * In-place or rolling updates for operator upgrades - * TLS connections and client certificate authentication - * Support for custom TLS certificates (including integration with cert-manager) - * Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) - * Backups on volume snapshots (where supported by the underlying storage classes) - * Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) - * Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores - * Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL - * Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way - * Hibernation of a PostgreSQL cluster in a declarative way - * Support for Synchronous Replicas - * Support for HA physical replication slots at cluster level - * Backup from a standby - * Backup retention policies (based on recovery window, only on object stores) - * Parallel WAL archiving and restore to allow the database to keep up with WAL - generation on high write systems - * Support tagging backup files uploaded to an object store to enable optional - retention management at the object store layer Replica clusters for - * PostgreSQL deployments across multiple Kubernetes - clusters, enabling private, public, hybrid, and multi-cloud architectures - * Connection pooling with PgBouncer - * Support for node affinity via `nodeSelector` - * Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187) - * Standard output logging of PostgreSQL error messages in JSON format - * Automatically set `readOnlyRootFilesystem` security context for pods - * `cnpg` plugin for `kubectl` - * Simple bind and search+bind LDAP client authentication - * Multi-arch format container images + - Direct integration with the Kubernetes API server for High Availability, + eliminating the need for external tools. + - Self-healing capabilities, including: + - Automated failover by promoting the most aligned replica. + - Automatic recreation of failed replicas. + - Planned switchover of the primary instance by promoting a selected replica. + - Declarative management of key PostgreSQL configurations, including: + - PostgreSQL settings. + - Roles, users, and groups. + - Databases, extensions, and schemas. + - Tablespaces (including temporary tablespaces). + - Flexible instance definition, supporting any number of instances (minimum 1 + primary server). + - Scale-up/down capabilities to dynamically adjust cluster size. + - Read-Write and Read-Only Services, ensuring applications connect correctly: + - *Read-Write Service*: Routes connections to the primary server. + - *Read-Only Service*: Distributes connections among replicas for read workloads. + - Support for quorum-based and priority-based PostgreSQL Synchronous + Replication. + - Replica clusters enabling PostgreSQL distributed topologies across multiple + Kubernetes clusters (private, public, hybrid, and multi-cloud). + - Delayed Replica clusters for point-in-time access to historical data. + - Persistent volume management, including: + - Support for Local Persistent Volumes with PVC templates. + - Reuse of Persistent Volumes storage in Pods. + - Separate volumes for WAL files and tablespaces. + - Backup and recovery options, including: + - Integration with the [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) + for continuous online backup via WAL archiving to AWS S3, S3-compatible + services, Azure Blob Storage, and Google Cloud Storage, with support for + retention policies based on a configurable recovery window. + - Backups using volume snapshots (where supported by storage classes). + - Full and Point-In-Time recovery from volume snapshots or object stores (via Barman Cloud plugin). + - Backup from standby replicas to reduce primary workload impact. + - Offline and online import of PostgreSQL databases, including major upgrades: + - *Offline Import*: Direct restore from existing databases. + - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. + - Offline In-Place Major Upgrades of PostgreSQL + - High Availability physical replication slots, including synchronization of + user-defined replication slots. + - Parallel WAL archiving and restore, ensuring high-performance data + synchronization in high-write environments. + - TLS support, including: + - Secure connections and client certificate authentication. + - Custom TLS certificates (integrated with `cert-manager`). + - Startup and readiness probes, including replica probes based on desired lag + from the primary. + - Declarative rolling updates for: + - PostgreSQL minor versions. + - Operator upgrades (in-place or rolling updates). + - Standard output logging of PostgreSQL error messages in JSON format for + easier integration with log aggregation tools. + - Prometheus-compatible metrics exporter (`metrics` port 9187) for custom + monitoring. + - `cnpg` plugin for `kubectl` to simplify cluster operations. + - Cluster hibernation for resource efficiency in inactive states. + - Fencing of PostgreSQL clusters (full cluster or subset) to isolate instances + when needed. + - Connection pooling with PgBouncer for improved database efficiency. + - OLM (Operator Lifecycle Manager) installation support for streamlined + deployments. + - Multi-arch container images, including Software Bill of Materials (SBOM) and + provenance attestations for security compliance. install: spec: @@ -107,20 +125,22 @@ spec: maintainers: - email: jonathan.gonzalez@enterprisedb.com name: Jonathan Gonzalez V. - - email: john.long@enterprisedb.com - name: John Long + - email: jonathan.battiato@enterprisedb.com + name: Jonathan Battiato + - email: niccolo.fei@enterprisedb.com + name: Niccolo Fei - email: gabriele.bartolini@enterprisedb.com name: Gabriele Bartolini maturity: stable provider: name: The CloudNativePG Contributors - replaces: 'cloudnative-pg.${REPLACE_VERSION}' icon: - base64data: <?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.6.1, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
	 viewBox="0 0 420 500" style="enable-background:new 0 0 420 500;" xml:space="preserve">
<style type="text/css">
	.st0{fill:#121646;}
	.st1{fill:url(#SVGID_1_);}
	.st2{fill:url(#SVGID_00000155867176934933239400000010055260633525782145_);}
</style>
<g id="Layer_2">
</g>
<g id="Layer_1">
	<g>
		<g>
			<g>
				<path class="st0" d="M359,369.61c-3.49-8.49-6.07-17.37-8.68-26.19c-2.98-10.05-5.44-20.24-8.39-30.3
					c-0.87-2.96-2.34-5.84-4.04-8.43c-1.69-2.58-3.44-2.19-4.33,0.73c-2.4,7.81-4.44,15.75-7.12,23.46
					c-4.83,13.92-11.42,26.87-20.43,38.72c-4.5,5.91-9.53,11.39-14.73,16.68c-2.6,2.65-5.26,5.25-7.91,7.84
					c-2.33,2.27-4.76,4.65-3.12,8.1c1.27,2.67,4.11,2.93,6.73,2.91c0.38,0,0.75-0.01,1.12-0.01c13.44-0.22,26.88,0.15,40.32,0.3
					c9.38,0.11,18.77,0.49,28.15,0.28c3.96-0.09,8.3,0.04,11.52-3.2c3.62-3.64,4.2-6.42,1.6-10.87
					C365.86,383.09,361.86,376.57,359,369.61z"/>
				<path class="st0" d="M385.31,258.33c-1.75-0.43-3.46-0.9-5.12-1.44c-17.21-5.5-29.1-16.67-42.19-29.26
					c-2-1.93-2.65-4.13-2.51-6.89c0.74-12.86-0.81-25.56-3.07-38.17c-3.19-17.59-8.46-34.6-15.64-51.01
					c-1.62-3.68-3.77-6.93-4.89-10.84c-0.11-0.43-0.2-0.92,0.09-1.26c0.31-0.38,0.94-0.29,1.41-0.11
					c4.17,1.71,10.82,20.58,16.18,15.75c1.14-1.03,1.57-2.6,1.86-4.11c4.56-22.48-1.19-57.13-21.27-71.04
					c-9.22-6.4-19.34-12.61-30.41-15.1c-5.66-1.26-11.8-1.19-17.01,1.37c-2.58,1.26-4.91,3.12-7.72,3.79
					c-5,1.23-9.99-1.57-14.92-3.07c-5.99-1.8-12.52-1.71-18.45,0.29c-4.76,1.59-9.51,4.42-14.45,3.5
					c-6.84-1.28-11.78-9.18-18.87-11.6c-8.5-2.92-17.95-2.92-26.66-0.96c-25,5.61-48.47,23.16-65.95,41.22
					c-19.19,19.81-26.82,46.02-9.85,69.94c6.06,8.55,14.97,15.21,25.13,17.84c1.97,0.52,4.06,0.92,5.7,2.13
					c6.46,4.8-7.18,17.66-10.1,21.83c-6.28,9.04-12.14,18.42-16.49,28.57c-8.08,18.78-11.6,37.68-14.43,57.78
					c-4.89,34.78-11.44,70.53-27.71,101.94c-2.63,5.05-5.9,10.08-7.97,15.37c-0.49,1.26-0.65,2.74,0,3.9
					c0.38,0.67,1.01,1.19,1.68,1.55c3.64,2.02,9.16,1.57,13.17,1.59c12.12,0.04,24.24,0.09,36.37,0.11
					c6.17,0.02,12.34,0.04,18.51,0.04c4.44,0,10.97,1.12,15.01-1.01c2.6-1.37,4.04-4.29,4.76-7.16c1.05-4.22,1.73-8.55,2.85-12.77
					c3.46-12.9,8.37-25.18,14.68-36.98c10.01-18.74,20.46-39.92,1.88-57.02c-4.89-4.49-10.52-8.06-15.78-12.09
					c-1.01-0.79-2-2.38-0.94-3.1c0.4-0.29,0.97-0.22,1.48-0.11c20.26,4.24,39,26.01,37.74,46.85c-0.67,10.88-5.05,20.64-8.37,30.74
					c-3.37,10.26-7.32,20.4-8.26,31.3c-0.38,4.31-0.83,8.66-0.88,12.99c-0.07,5.61,2.04,8.5,8.42,8.53
					c23.32,0.04,46.63,0.4,69.95,0.63c5.92,0.07,18.09,2.02,20.46-5.63c0.88-2.83-0.25-5.86-1.71-8.42
					c-1.46-2.56-3.32-4.96-4.11-7.81c-1.44-5.09,0.81-10.46,3.19-15.19c2.76-5.48,6.19-10.61,8.93-16.09
					c3.21-5.79,6.98-11.18,9.87-17.17c3.05-6.26,5.52-12.79,7.56-19.43c2.76-8.91,4.76-18.04,6.33-27.22
					c0.34-1.99,0.55-3.99,0.79-5.99c0.12-1.01,0.24-2.01,0.39-3.02c0.16-1.13,0.41-2.46,1.87-2.35c8.7,0.61-4.36,50.4-5.5,53.44
					c-2.67,7.14-5.88,14.07-9.65,20.71c-4.73,8.33-13.44,15.89-11.31,26.41c0.76,3.73,1.66,9.07,5.03,11.42
					c4.62,3.25,10.57-2.15,13.85-5.09c5.16-4.65,9.31-10.19,13.67-15.57c11.98-14.81,21.21-31.66,27.92-49.46
					c7.29-19.43,10.55-38.75,13.1-59.22c0.16-1.17,0.4-2.49,1.39-3.12c1.12-0.72,2.58-0.18,3.79,0.36
					c14.81,6.55,30.81,11.85,46.92,10.3c1.97-0.2,4.44-1.03,4.49-3.01C389.61,259.72,387.21,258.78,385.31,258.33z M306.52,248.21
					c-0.16,0.56-0.58,1.05-1.08,1.37c-0.94,0.58-2.2,0.74-3.28,0.85c-7.18,0.74-14.25-1.46-20.71-4.38
					c-1.03-0.45-2.06-0.92-3.1-1.44c-9.85-4.89-19.01-11.87-26.57-19.86c-6.42-6.78-11.49-14.81-14.97-23.47
					c-0.74-1.84-1.62-3.81-2.04-5.77c-0.36-1.66-0.13-3.61,1.14-4.76c0.81-0.72,1.91-0.99,2.96-1.21c3.52-0.7,7.27-1.08,10.84-0.76
					c1.46,0.09,2.98,0.47,4.02,1.48c0.92,0.87,1.37,2.11,1.77,3.32c1.75,5.25,3.21,10.66,5.7,15.64c2.51,5.05,5.83,9.78,9.29,14.23
					c1.26,1.62,2.56,3.19,3.9,4.73c6.66,7.65,15.42,14.5,25.65,16.45c0.4,0.09,0.81,0.16,1.23,0.22c1.41,0.2,2.9,0.34,4.11,1.12
					c0.58,0.38,1.1,0.99,1.17,1.68C306.56,247.85,306.56,248.03,306.52,248.21z"/>
			</g>
		</g>
		<g>
			<g>
				<radialGradient id="SVGID_1_" cx="-302.7225" cy="-304.0343" r="1177.4547" gradientUnits="userSpaceOnUse">
					<stop  offset="0" style="stop-color:#732DD9"/>
					<stop  offset="0.1185" style="stop-color:#6A2BCB"/>
					<stop  offset="0.3434" style="stop-color:#5125A5"/>
					<stop  offset="0.6486" style="stop-color:#291C69"/>
					<stop  offset="0.8139" style="stop-color:#121646"/>
					<stop  offset="1" style="stop-color:#121646"/>
				</radialGradient>
				<path class="st1" d="M359,369.61c-3.49-8.49-6.07-17.37-8.68-26.19c-2.98-10.05-5.44-20.24-8.39-30.3
					c-0.87-2.96-2.34-5.84-4.04-8.43c-1.69-2.58-3.44-2.19-4.33,0.73c-2.4,7.81-4.44,15.75-7.12,23.46
					c-4.83,13.92-11.42,26.87-20.43,38.72c-4.5,5.91-9.53,11.39-14.73,16.68c-2.6,2.65-5.26,5.25-7.91,7.84
					c-2.33,2.27-4.76,4.65-3.12,8.1c1.27,2.67,4.11,2.93,6.73,2.91c0.38,0,0.75-0.01,1.12-0.01c13.44-0.22,26.88,0.15,40.32,0.3
					c9.38,0.11,18.77,0.49,28.15,0.28c3.96-0.09,8.3,0.04,11.52-3.2c3.62-3.64,4.2-6.42,1.6-10.87
					C365.86,383.09,361.86,376.57,359,369.61z"/>
				
					<radialGradient id="SVGID_00000142875328974202796550000005776013639545705360_" cx="-302.7225" cy="-304.0343" r="1177.4547" gradientUnits="userSpaceOnUse">
					<stop  offset="0" style="stop-color:#732DD9"/>
					<stop  offset="0.1185" style="stop-color:#6A2BCB"/>
					<stop  offset="0.3434" style="stop-color:#5125A5"/>
					<stop  offset="0.6486" style="stop-color:#291C69"/>
					<stop  offset="0.8139" style="stop-color:#121646"/>
					<stop  offset="1" style="stop-color:#121646"/>
				</radialGradient>
				<path style="fill:url(#SVGID_00000142875328974202796550000005776013639545705360_);" d="M385.31,258.33
					c-1.75-0.43-3.46-0.9-5.12-1.44c-17.21-5.5-29.1-16.67-42.19-29.26c-2-1.93-2.65-4.13-2.51-6.89
					c0.74-12.86-0.81-25.56-3.07-38.17c-3.19-17.59-8.46-34.6-15.64-51.01c-1.62-3.68-3.77-6.93-4.89-10.84
					c-0.11-0.43-0.2-0.92,0.09-1.26c0.31-0.38,0.94-0.29,1.41-0.11c4.17,1.71,10.82,20.58,16.18,15.75c1.14-1.03,1.57-2.6,1.86-4.11
					c4.56-22.48-1.19-57.13-21.27-71.04c-9.22-6.4-19.34-12.61-30.41-15.1c-5.66-1.26-11.8-1.19-17.01,1.37
					c-2.58,1.26-4.91,3.12-7.72,3.79c-5,1.23-9.99-1.57-14.92-3.07c-5.99-1.8-12.52-1.71-18.45,0.29c-4.76,1.59-9.51,4.42-14.45,3.5
					c-6.84-1.28-11.78-9.18-18.87-11.6c-8.5-2.92-17.95-2.92-26.66-0.96c-25,5.61-48.47,23.16-65.95,41.22
					c-19.19,19.81-26.82,46.02-9.85,69.94c6.06,8.55,14.97,15.21,25.13,17.84c1.97,0.52,4.06,0.92,5.7,2.13
					c6.46,4.8-7.18,17.66-10.1,21.83c-6.28,9.04-12.14,18.42-16.49,28.57c-8.08,18.78-11.6,37.68-14.43,57.78
					c-4.89,34.78-11.44,70.53-27.71,101.94c-2.63,5.05-5.9,10.08-7.97,15.37c-0.49,1.26-0.65,2.74,0,3.9
					c0.38,0.67,1.01,1.19,1.68,1.55c3.64,2.02,9.16,1.57,13.17,1.59c12.12,0.04,24.24,0.09,36.37,0.11
					c6.17,0.02,12.34,0.04,18.51,0.04c4.44,0,10.97,1.12,15.01-1.01c2.6-1.37,4.04-4.29,4.76-7.16c1.05-4.22,1.73-8.55,2.85-12.77
					c3.46-12.9,8.37-25.18,14.68-36.98c10.01-18.74,20.46-39.92,1.88-57.02c-4.89-4.49-10.52-8.06-15.78-12.09
					c-1.01-0.79-2-2.38-0.94-3.1c0.4-0.29,0.97-0.22,1.48-0.11c20.26,4.24,39,26.01,37.74,46.85c-0.67,10.88-5.05,20.64-8.37,30.74
					c-3.37,10.26-7.32,20.4-8.26,31.3c-0.38,4.31-0.83,8.66-0.88,12.99c-0.07,5.61,2.04,8.5,8.42,8.53
					c23.32,0.04,46.63,0.4,69.95,0.63c5.92,0.07,18.09,2.02,20.46-5.63c0.88-2.83-0.25-5.86-1.71-8.42
					c-1.46-2.56-3.32-4.96-4.11-7.81c-1.44-5.09,0.81-10.46,3.19-15.19c2.76-5.48,6.19-10.61,8.93-16.09
					c3.21-5.79,6.98-11.18,9.87-17.17c3.05-6.26,5.52-12.79,7.56-19.43c2.76-8.91,4.76-18.04,6.33-27.22
					c0.34-1.99,0.55-3.99,0.79-5.99c0.12-1.01,0.24-2.01,0.39-3.02c0.16-1.13,0.41-2.46,1.87-2.35c8.7,0.61-4.36,50.4-5.5,53.44
					c-2.67,7.14-5.88,14.07-9.65,20.71c-4.73,8.33-13.44,15.89-11.31,26.41c0.76,3.73,1.66,9.07,5.03,11.42
					c4.62,3.25,10.57-2.15,13.85-5.09c5.16-4.65,9.31-10.19,13.67-15.57c11.98-14.81,21.21-31.66,27.92-49.46
					c7.29-19.43,10.55-38.75,13.1-59.22c0.16-1.17,0.4-2.49,1.39-3.12c1.12-0.72,2.58-0.18,3.79,0.36
					c14.81,6.55,30.81,11.85,46.92,10.3c1.97-0.2,4.44-1.03,4.49-3.01C389.61,259.72,387.21,258.78,385.31,258.33z M306.52,248.21
					c-0.16,0.56-0.58,1.05-1.08,1.37c-0.94,0.58-2.2,0.74-3.28,0.85c-7.18,0.74-14.25-1.46-20.71-4.38
					c-1.03-0.45-2.06-0.92-3.1-1.44c-9.85-4.89-19.01-11.87-26.57-19.86c-6.42-6.78-11.49-14.81-14.97-23.47
					c-0.74-1.84-1.62-3.81-2.04-5.77c-0.36-1.66-0.13-3.61,1.14-4.76c0.81-0.72,1.91-0.99,2.96-1.21c3.52-0.7,7.27-1.08,10.84-0.76
					c1.46,0.09,2.98,0.47,4.02,1.48c0.92,0.87,1.37,2.11,1.77,3.32c1.75,5.25,3.21,10.66,5.7,15.64c2.51,5.05,5.83,9.78,9.29,14.23
					c1.26,1.62,2.56,3.19,3.9,4.73c6.66,7.65,15.42,14.5,25.65,16.45c0.4,0.09,0.81,0.16,1.23,0.22c1.41,0.2,2.9,0.34,4.11,1.12
					c0.58,0.38,1.1,0.99,1.17,1.68C306.56,247.85,306.56,248.03,306.52,248.21z"/>
			</g>
		</g>
	</g>
	<g>
		<g>
			<g>
				<path class="st0" d="M49.62,435.45c1.26-1.81,3.15-2.72,5.67-2.72c2.33,0,4.06,0.54,5.19,1.6c1.14,1.07,1.8,2.79,1.98,5.17h7.58
					c-0.4-4.11-1.9-7.3-4.5-9.58c-2.6-2.27-6.02-3.41-10.26-3.41c-3.07,0-5.76,0.72-8.08,2.16c-2.32,1.44-4.1,3.51-5.35,6.21
					c-1.25,2.7-1.87,5.81-1.87,9.35v2.25c0,5.54,1.35,9.9,4.06,13.08c2.7,3.18,6.42,4.76,11.13,4.76c4.31,0,7.79-1.14,10.43-3.41
					c2.64-2.27,4.1-5.39,4.39-9.35h-7.58c-0.17,2.24-0.83,3.9-1.98,4.97c-1.15,1.07-2.9,1.61-5.24,1.61c-2.6,0-4.49-0.92-5.7-2.76
					c-1.2-1.84-1.81-4.74-1.81-8.72v-2.78C47.72,440.07,48.36,437.26,49.62,435.45z"/>
				<rect x="74.88" y="425" class="st0" width="7.33" height="38.82"/>
				<path class="st0" d="M109.04,439.36c-2.33-2.26-5.35-3.39-9.04-3.39c-2.6,0-4.87,0.58-6.84,1.74c-1.96,1.16-3.47,2.81-4.51,4.93
					c-1.04,2.12-1.57,4.54-1.57,7.25v0.33c0,4.35,1.18,7.79,3.53,10.31c2.35,2.53,5.5,3.79,9.44,3.79c3.94,0,7.09-1.27,9.43-3.8
					c2.34-2.54,3.51-5.9,3.51-10.1l-0.05-1.87C112.68,444.68,111.38,441.62,109.04,439.36z M104.2,456.4
					c-0.99,1.36-2.38,2.04-4.15,2.04c-1.82,0-3.22-0.69-4.2-2.06c-0.98-1.38-1.47-3.36-1.47-5.96c0-2.92,0.49-5.08,1.47-6.47
					c0.98-1.39,2.36-2.09,4.15-2.09c1.8,0,3.2,0.7,4.2,2.1c0.99,1.4,1.49,3.38,1.49,5.92C105.69,452.87,105.2,455.04,104.2,456.4z"
					/>
				<path class="st0" d="M133.62,455.86c-0.93,1.72-2.63,2.58-5.11,2.58c-2.6,0-3.89-1.43-3.89-4.28v-17.69h-7.3v17.92
					c0.03,3.25,0.82,5.72,2.36,7.41c1.54,1.68,3.78,2.53,6.71,2.53c3.18,0,5.68-1.1,7.48-3.29l0.2,2.78h6.88v-27.35h-7.33V455.86z"
					/>
				<path class="st0" d="M162.64,438.95c-1.74-1.99-3.93-2.98-6.6-2.98c-3.32,0-5.94,1.26-7.85,3.79
					c-1.91,2.53-2.87,5.92-2.87,10.19c0,4.52,0.97,8.04,2.92,10.58c1.95,2.54,4.53,3.8,7.75,3.8c2.86,0,5.21-1.14,7.03-3.41
					l0.35,2.91h6.6V425h-7.33V438.95z M162.64,455.58c-0.93,1.91-2.52,2.86-4.78,2.86c-1.7,0-3-0.68-3.89-2.05
					c-0.89-1.37-1.34-3.33-1.34-5.9c0-5.72,1.76-8.58,5.28-8.58c2.24,0,3.82,0.95,4.73,2.86V455.58z"/>
				<polygon class="st0" points="198.48,451.29 183.7,427.02 176.11,427.02 176.11,463.82 183.7,463.82 183.7,439.61 198.46,463.82 
					206.04,463.82 206.04,427.02 198.48,427.02 				"/>
				<path class="st0" d="M234.32,445.65c0-3.07-0.99-5.45-2.97-7.14c-1.98-1.69-4.66-2.54-8.03-2.54c-2.22,0-4.23,0.38-6.02,1.15
					c-1.79,0.77-3.17,1.82-4.15,3.17c-0.98,1.35-1.47,2.81-1.47,4.4h7.3c0-1.03,0.35-1.84,1.05-2.43c0.7-0.59,1.66-0.88,2.89-0.88
					c1.4,0,2.42,0.38,3.07,1.14c0.65,0.76,0.97,1.77,0.97,3.03v1.57h-3.36c-4.06,0.02-7.17,0.8-9.31,2.35
					c-2.15,1.55-3.22,3.77-3.22,6.67c0,2.36,0.88,4.31,2.64,5.86c1.76,1.55,3.98,2.33,6.66,2.33c2.83,0,5.13-0.99,6.9-2.96
					c0.15,0.98,0.4,1.79,0.73,2.45h7.38v-0.43c-0.71-1.33-1.07-3.29-1.09-5.89V445.65z M226.99,456.09
					c-0.44,0.81-1.11,1.45-2.02,1.93c-0.91,0.48-1.9,0.72-2.98,0.72c-1.11,0-1.99-0.29-2.63-0.88c-0.64-0.59-0.96-1.34-0.96-2.25
					l0.03-0.43c0.24-2.53,2.18-3.79,5.84-3.79h2.73V456.09z"/>
				<path class="st0" d="M249.26,457.66c-0.39-0.44-0.58-1.16-0.58-2.17v-13.65h4.68v-5.36h-4.68v-6.72h-7.3v6.72h-3.99v5.36h3.99
					v14.74c0.1,5.17,2.71,7.76,7.84,7.76c1.52,0,2.99-0.22,4.42-0.66v-5.54c-0.62,0.12-1.34,0.18-2.15,0.18
					C250.39,458.31,249.65,458.1,249.26,457.66z"/>
				<path class="st0" d="M261.47,425.63c-1.26,0-2.26,0.35-3,1.06c-0.73,0.71-1.1,1.61-1.1,2.7c0,1.11,0.37,2.02,1.12,2.73
					c0.75,0.71,1.74,1.06,2.97,1.06c1.23,0,2.22-0.35,2.97-1.06c0.75-0.71,1.12-1.62,1.12-2.73c0-1.1-0.37-2-1.11-2.7
					C263.71,425.99,262.72,425.63,261.47,425.63z"/>
				<rect x="257.81" y="436.48" class="st0" width="7.33" height="27.35"/>
				<polygon class="st0" points="281.39,454.9 276.31,436.48 268.67,436.48 277.9,463.82 284.88,463.82 294.1,436.48 286.47,436.48 
									"/>
				<path class="st0" d="M320.82,449.54c0-4.33-1.07-7.68-3.22-10.03c-2.15-2.36-5.09-3.54-8.83-3.54c-2.49,0-4.71,0.59-6.66,1.76
					c-1.95,1.17-3.45,2.84-4.51,5c-1.06,2.17-1.59,4.62-1.59,7.37v0.71c0,4.09,1.26,7.37,3.78,9.83c2.52,2.46,5.78,3.69,9.79,3.69
					c2.27,0,4.35-0.43,6.22-1.3c1.87-0.87,3.35-2.08,4.45-3.63l-3.59-4.02c-1.58,2.04-3.78,3.06-6.6,3.06
					c-1.82,0-3.32-0.54-4.51-1.62c-1.19-1.08-1.9-2.51-2.14-4.3h17.42V449.54z M313.64,447.55h-10.16c0.25-1.83,0.83-3.23,1.73-4.21
					c0.9-0.98,2.08-1.47,3.53-1.47c1.55,0,2.75,0.44,3.59,1.33c0.84,0.89,1.28,2.15,1.31,3.77V447.55z"/>
			</g>
			<g>
				<path class="st0" d="M49.62,435.45c1.26-1.81,3.15-2.72,5.67-2.72c2.33,0,4.06,0.54,5.19,1.6c1.14,1.07,1.8,2.79,1.98,5.17h7.58
					c-0.4-4.11-1.9-7.3-4.5-9.58c-2.6-2.27-6.02-3.41-10.26-3.41c-3.07,0-5.76,0.72-8.08,2.16c-2.32,1.44-4.1,3.51-5.35,6.21
					c-1.25,2.7-1.87,5.81-1.87,9.35v2.25c0,5.54,1.35,9.9,4.06,13.08c2.7,3.18,6.42,4.76,11.13,4.76c4.31,0,7.79-1.14,10.43-3.41
					c2.64-2.27,4.1-5.39,4.39-9.35h-7.58c-0.17,2.24-0.83,3.9-1.98,4.97c-1.15,1.07-2.9,1.61-5.24,1.61c-2.6,0-4.49-0.92-5.7-2.76
					c-1.2-1.84-1.81-4.74-1.81-8.72v-2.78C47.72,440.07,48.36,437.26,49.62,435.45z"/>
				<rect x="74.88" y="425" class="st0" width="7.33" height="38.82"/>
				<path class="st0" d="M109.04,439.36c-2.33-2.26-5.35-3.39-9.04-3.39c-2.6,0-4.87,0.58-6.84,1.74c-1.96,1.16-3.47,2.81-4.51,4.93
					c-1.04,2.12-1.57,4.54-1.57,7.25v0.33c0,4.35,1.18,7.79,3.53,10.31c2.35,2.53,5.5,3.79,9.44,3.79c3.94,0,7.09-1.27,9.43-3.8
					c2.34-2.54,3.51-5.9,3.51-10.1l-0.05-1.87C112.68,444.68,111.38,441.62,109.04,439.36z M104.2,456.4
					c-0.99,1.36-2.38,2.04-4.15,2.04c-1.82,0-3.22-0.69-4.2-2.06c-0.98-1.38-1.47-3.36-1.47-5.96c0-2.92,0.49-5.08,1.47-6.47
					c0.98-1.39,2.36-2.09,4.15-2.09c1.8,0,3.2,0.7,4.2,2.1c0.99,1.4,1.49,3.38,1.49,5.92C105.69,452.87,105.2,455.04,104.2,456.4z"
					/>
				<path class="st0" d="M133.62,455.86c-0.93,1.72-2.63,2.58-5.11,2.58c-2.6,0-3.89-1.43-3.89-4.28v-17.69h-7.3v17.92
					c0.03,3.25,0.82,5.72,2.36,7.41c1.54,1.68,3.78,2.53,6.71,2.53c3.18,0,5.68-1.1,7.48-3.29l0.2,2.78h6.88v-27.35h-7.33V455.86z"
					/>
				<path class="st0" d="M162.64,438.95c-1.74-1.99-3.93-2.98-6.6-2.98c-3.32,0-5.94,1.26-7.85,3.79
					c-1.91,2.53-2.87,5.92-2.87,10.19c0,4.52,0.97,8.04,2.92,10.58c1.95,2.54,4.53,3.8,7.75,3.8c2.86,0,5.21-1.14,7.03-3.41
					l0.35,2.91h6.6V425h-7.33V438.95z M162.64,455.58c-0.93,1.91-2.52,2.86-4.78,2.86c-1.7,0-3-0.68-3.89-2.05
					c-0.89-1.37-1.34-3.33-1.34-5.9c0-5.72,1.76-8.58,5.28-8.58c2.24,0,3.82,0.95,4.73,2.86V455.58z"/>
				<polygon class="st0" points="198.48,451.29 183.7,427.02 176.11,427.02 176.11,463.82 183.7,463.82 183.7,439.61 198.46,463.82 
					206.04,463.82 206.04,427.02 198.48,427.02 				"/>
				<path class="st0" d="M234.32,445.65c0-3.07-0.99-5.45-2.97-7.14c-1.98-1.69-4.66-2.54-8.03-2.54c-2.22,0-4.23,0.38-6.02,1.15
					c-1.79,0.77-3.17,1.82-4.15,3.17c-0.98,1.35-1.47,2.81-1.47,4.4h7.3c0-1.03,0.35-1.84,1.05-2.43c0.7-0.59,1.66-0.88,2.89-0.88
					c1.4,0,2.42,0.38,3.07,1.14c0.65,0.76,0.97,1.77,0.97,3.03v1.57h-3.36c-4.06,0.02-7.17,0.8-9.31,2.35
					c-2.15,1.55-3.22,3.77-3.22,6.67c0,2.36,0.88,4.31,2.64,5.86c1.76,1.55,3.98,2.33,6.66,2.33c2.83,0,5.13-0.99,6.9-2.96
					c0.15,0.98,0.4,1.79,0.73,2.45h7.38v-0.43c-0.71-1.33-1.07-3.29-1.09-5.89V445.65z M226.99,456.09
					c-0.44,0.81-1.11,1.45-2.02,1.93c-0.91,0.48-1.9,0.72-2.98,0.72c-1.11,0-1.99-0.29-2.63-0.88c-0.64-0.59-0.96-1.34-0.96-2.25
					l0.03-0.43c0.24-2.53,2.18-3.79,5.84-3.79h2.73V456.09z"/>
				<path class="st0" d="M249.26,457.66c-0.39-0.44-0.58-1.16-0.58-2.17v-13.65h4.68v-5.36h-4.68v-6.72h-7.3v6.72h-3.99v5.36h3.99
					v14.74c0.1,5.17,2.71,7.76,7.84,7.76c1.52,0,2.99-0.22,4.42-0.66v-5.54c-0.62,0.12-1.34,0.18-2.15,0.18
					C250.39,458.31,249.65,458.1,249.26,457.66z"/>
				<path class="st0" d="M261.47,425.63c-1.26,0-2.26,0.35-3,1.06c-0.73,0.71-1.1,1.61-1.1,2.7c0,1.11,0.37,2.02,1.12,2.73
					c0.75,0.71,1.74,1.06,2.97,1.06c1.23,0,2.22-0.35,2.97-1.06c0.75-0.71,1.12-1.62,1.12-2.73c0-1.1-0.37-2-1.11-2.7
					C263.71,425.99,262.72,425.63,261.47,425.63z"/>
				<rect x="257.81" y="436.48" class="st0" width="7.33" height="27.35"/>
				<polygon class="st0" points="281.39,454.9 276.31,436.48 268.67,436.48 277.9,463.82 284.88,463.82 294.1,436.48 286.47,436.48 
									"/>
				<path class="st0" d="M320.82,449.54c0-4.33-1.07-7.68-3.22-10.03c-2.15-2.36-5.09-3.54-8.83-3.54c-2.49,0-4.71,0.59-6.66,1.76
					c-1.95,1.17-3.45,2.84-4.51,5c-1.06,2.17-1.59,4.62-1.59,7.37v0.71c0,4.09,1.26,7.37,3.78,9.83c2.52,2.46,5.78,3.69,9.79,3.69
					c2.27,0,4.35-0.43,6.22-1.3c1.87-0.87,3.35-2.08,4.45-3.63l-3.59-4.02c-1.58,2.04-3.78,3.06-6.6,3.06
					c-1.82,0-3.32-0.54-4.51-1.62c-1.19-1.08-1.9-2.51-2.14-4.3h17.42V449.54z M313.64,447.55h-10.16c0.25-1.83,0.83-3.23,1.73-4.21
					c0.9-0.98,2.08-1.47,3.53-1.47c1.55,0,2.75,0.44,3.59,1.33c0.84,0.89,1.28,2.15,1.31,3.77V447.55z"/>
			</g>
		</g>
		<g>
			<path class="st0" d="M334.21,450.86v12.97h-7.58v-36.8h14.36c2.76,0,5.19,0.51,7.29,1.52c2.1,1.01,3.71,2.45,4.84,4.31
				c1.13,1.86,1.69,3.98,1.69,6.36c0,3.61-1.23,6.45-3.7,8.53c-2.47,2.08-5.89,3.12-10.25,3.12H334.21z M334.21,444.72h6.77
				c2.01,0,3.53-0.47,4.59-1.42c1.05-0.94,1.58-2.29,1.58-4.04c0-1.8-0.53-3.26-1.59-4.37s-2.53-1.68-4.4-1.72h-6.95V444.72z"/>
			<path class="st0" d="M389.14,459.17c-1.36,1.63-3.29,2.9-5.79,3.8c-2.49,0.9-5.26,1.35-8.29,1.35c-3.18,0-5.98-0.7-8.38-2.09
				c-2.4-1.39-4.25-3.41-5.56-6.05c-1.31-2.65-1.98-5.75-2.01-9.33v-2.5c0-3.67,0.62-6.85,1.86-9.54c1.24-2.69,3.02-4.74,5.36-6.17
				c2.33-1.42,5.07-2.14,8.2-2.14c4.36,0,7.78,1.04,10.24,3.12c2.46,2.08,3.92,5.11,4.37,9.09h-7.38c-0.34-2.11-1.08-3.65-2.24-4.63
				c-1.15-0.98-2.74-1.47-4.76-1.47c-2.58,0-4.54,0.97-5.89,2.91c-1.35,1.94-2.03,4.82-2.05,8.64v2.35c0,3.86,0.73,6.77,2.2,8.75
				c1.47,1.97,3.61,2.96,6.45,2.96c2.85,0,4.88-0.61,6.09-1.82v-6.34h-6.9v-5.59h14.48V459.17z"/>
		</g>
	</g>
</g>
</svg>
 mediatype: image/svg+xml apiservicedefinitions: {} customresourcedefinitions: owned: + # Backup Section - kind: Backup name: backups.postgresql.cnpg.io displayName: Backups @@ -141,10 +161,11 @@ spec: - 'urn:alm:descriptor:io.kubernetes:Clusters' statusDescriptors: - displayName: Phase - description: Current backupphase + description: Current backup phase path: phase x-descriptors: - 'urn:alm:descriptor:io.kubernetes.phase' + # Cluster Section - kind: Cluster name: clusters.postgresql.cnpg.io version: v1 @@ -172,6 +193,18 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:text' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Image section + - path: imagePullSecrets + displayName: Image Pull Secret + description: List of secrets to use for pulling the images + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: imagePullSecrets[0].name + displayName: Image Pull Secret + description: Secret for pulling the image. If empty, no secret will be used + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: imageName displayName: Image Name description: Name of the PostgreSQL container image @@ -196,6 +229,13 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:hidden' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: imageCatalogRef + displayName: Image Catalog + description: The name of the image catalog to use + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Instances section - path: instances displayName: Instances description: Number of instances required in the cluster @@ -213,6 +253,34 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: enablePDB + displayName: Enable Pod Disruption Budget + description: Boolean to enable or disable the Pod Disruption Budget + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: priorityClassName + displayName: Priority Class Name + description: The name of the Priority Class to use in every generated Pod + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:PriorityClass' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: env + displayName: Environment Variables + description: Environment variables to set in the pods created in the cluster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: envFrom + displayName: Environment Variables from ConfigMap + description: ConfigMap to use as environment variables in the pods created in the cluster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: seccompProfile + displayName: Seccomp Profile applied to every pod in the cluster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # PostgreSQL Configuration section - path: postgresql displayName: PostgreSQL Configuration description: Options for postgresql.conf @@ -224,11 +292,8 @@ spec: - path: postgresql.pg_hba[0] displayName: pg_hba rules description: PostgreSQL Host Based Authentication rules - - path: postgresql.epas.audit - displayName: EPAS Configuration - description: Boolean to enable edb_audit logging - path: postgresql.promotionTimeout - displayName: pgctl Timeout + displayName: pg_ctl Timeout description: maximum number of seconds to wait when promoting an instance to primary - path: postgresql.shared_preload_libraries[0] displayName: Preload Libraries @@ -257,6 +322,34 @@ spec: description: Boolean to enable TLS x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - path: postgresql.synchronous + displayName: Synchronous Replication Configuration + description: Configuration of the synchronous replication feature + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: postgresql.synchronous.method + displayName: Synchronous Replication Configuration Method + description: The method to use for synchronous replication feature + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:select:any' + - 'urn:alm:descriptor:com.tectonic.ui:select:first' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: tablespaces + displayName: Tablespaces + description: Configuration of the tablespaces + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: managed + displayName: Managed service + description: Resources managed by the operator + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: replicationSlots + displayName: Replication Slots Configuration + description: Configuration of the replication slots + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Bootstrap section - path: bootstrap displayName: Bootstrap Configuration description: Instructions to bootstrap the cluster @@ -267,12 +360,14 @@ spec: description: The name of the Bootstrap secret x-descriptors: - 'urn:alm:descriptor:io.kubernetes:Secret' + # Replica cluster section - path: replica displayName: Replica description: Replica cluster configuration x-descriptors: - 'urn:alm:descriptor:io.kubernetes:Secret' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Superuser section - path: superuserSecret displayName: Superuser Secret description: | @@ -282,7 +377,7 @@ spec: - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: superuserSecret.name displayName: Superuser Secret - description: Name of hte Superuser Secret + description: Name of the Superuser Secret x-descriptors: - 'urn:alm:descriptor:io.kubernetes:Secret' - path: enableSuperuserAccess @@ -291,16 +386,13 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Certificates section - path: certificates displayName: Certificates description: The configuration for the CA and related certificates x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' - - path: imagePullSecrets[0].name - displayName: Image Pull Secret - description: Secret for pulling the image. If empty, no secret will be used - x-descriptors: - - 'urn:alm:descriptor:io.kubernetes:Secret' + # Storage section - path: storage displayName: Storage description: Configuration of the storage of the instances @@ -334,6 +426,7 @@ spec: path: storage.pvcTemplate x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Delay and timeout section - path: startDelay displayName: Maximum Start Delay description: The time in seconds that is allowed for a PostgreSQL instance @@ -348,6 +441,39 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: smartShutdownTimeout + displayName: Smart Shutdown Timeout + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: switchoverDelay + displayName: Switchover Delay + description: The time in seconds that is allowed for a PostgreSQL instance + to gracefully shutdown during a switchover + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: failoverDelay + displayName: Failover Delay + description: The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected to be unhealthy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: livenessProbeTimeout + displayName: Liveness Probe Timeout + description: The time in seconds that is allowed for the liveness probe to + complete + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Probes configuration section + - path: probes + display: Probes Configuration + description: Configuration of the probes to be injected in the PostgreSQL instances + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Affinity section - path: affinity displayName: Pod Affinity description: Affinity/Anti-affinity rules for Pods @@ -363,11 +489,13 @@ spec: description: Key value pair of which nodes the pods can run x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Node' + # Resources section - path: resources - display: Resources - description: + displayName: PostgreSQL Resources requirement + description: Resources requirement for the PostgreSQL instances x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + # Update strategy section - path: primaryUpdateStrategy displayName: Primary Update Strategy x-descriptors: @@ -376,6 +504,10 @@ spec: displayName: Primary Update Method x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Backup section + - path: backup + displayName: Backup Configuration + description: Configuration to be used for backups - path: backup.barmanObjectStore.endpointURL displayName: Object Storage Endpoint description: S3-compatible object storage Endpoint. If empty the S3 default is used @@ -408,7 +540,7 @@ spec: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.wal.encryption displayName: WAL encryption - description: WAL encryprion algorithm + description: WAL encryption algorithm x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.data.compression @@ -418,7 +550,7 @@ spec: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.data.encryption displayName: Data encryption - description: Data encryprion algorithm + description: Data encryption algorithm x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.data.immediateCheckpoint @@ -429,6 +561,10 @@ spec: displayName: Jobs x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' + # Maintenance Window section + - path: nodeMaintenanceWindow + displayName: Node Maintenance Window + description: The configuration of the maintenance window for Kubernetes nodes - path: nodeMaintenanceWindow.inProgress displayName: In Progress description: Maintenance window for Kubernetes node upgrades is in progress @@ -440,6 +576,7 @@ spec: description: Should the existing PVCs be reused during Kubernetes upgrades? x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + # Monitoring section - path: monitoring displayName: Monitoring description: The configuration of the monitoring infrastructure of this cluster @@ -465,17 +602,25 @@ spec: displayName: Enable PodMonitor resource x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + # External Clusters section - path: externalClusters displayName: External Clusters description: List of external clusters which are used in the configuration x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Log Level section - path: logLevel displayName: Log Level description: One of error, info (default), debug or trace x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Plugins section + - path: plugins + displayName: Plugins + description: List of plugins to be installed + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' statusDescriptors: - displayName: Working Pods description: Status Pods @@ -549,7 +694,7 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:text' - path: type - description: Service type of the cluster to connect to ('rw' or 'rw') + description: Service type of the cluster to connect to ('rw' or 'ro') x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: instances @@ -568,7 +713,7 @@ spec: - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: pgbouncer.poolMode displayName: PgBouncer PoolMode - description: The poolmode to use. One of 'session' or 'transaction'. + description: The pool mode to use. One of 'session' or 'transaction'. x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: pgbouncer.authQuerySecret @@ -676,23 +821,199 @@ spec: - 'urn:alm:descriptor:com.tectonic.ui:text' - kind: Database name: databases.postgresql.cnpg.io - displayName: Database management - description: Declarative database management + displayName: Postgres Database + description: Declarative creation and management of a database on a Cluster version: v1 resources: - kind: Cluster name: '' version: v1 specDescriptors: - - path: databaseReclaimPolicy - displayName: Database reclaim policy - description: Database reclame policy - path: cluster displayName: Cluster requested to create the database - description: Cluster requested to create the database + description: Cluster in which to create the database - path: name displayName: Database name description: Database name - path: owner displayName: Database Owner - description: Database Owner + description: Owner of the database that will be created in Postgres + - path: ensure + displayName: Ensure + description: Ensure the PostgreSQL database is `present` or `absent` + - path: databaseReclaimPolicy + displayName: Database reclaim policy + description: Specifies the action to take for the database inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. + # Configuration section + - path: template + displayName: Template + description: The name of the template from which to create this database. + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: tablespace + displayName: Tablespace + description: The name of the tablespace that will be associated with the database. + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: isTemplate + displayName: Database is a template + description: If true, this database is considered a template + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: allowConnections + displayName: Allow Connections + description: If false, then no one can connect to this database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: connectionLimit + displayName: Connection Limit + description: How many concurrent connections can be made to this database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Encoding and Locale + - path: encoding + displayName: Encoding + description: Character set encoding to use in the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: locale + displayName: Locale + description: Sets the default collation order and character classification for the database. + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: localeCollate + displayName: LC collate + description: The collation to use for the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: localeCType + displayName: LC ctype + description: The ctype to use for the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: localeProvider + displayName: Locale Provider + description: Specifies the provider to use for the default collation in this database (Available from PostgreSQL 16). + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: collationVersion + displayName: Collation version + description: The version identifier of the collation + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: builtinLocale + displayName: Builtin locale + description: The choice of which builtin locale to use + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # ICU section + - path: icuLocale + displayName: ICU locale + description: ICU locale to use for the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: icuRules + displayName: ICU rules + description: Additional customization of ICU locale + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + statusDescriptors: + - path: applied + displayName: Applied + description: Applied is true if the database was reconciled correctly + - path: message + displayName: Message + description: Message is the reconciliation output message + - kind: Publication + name: publications.postgresql.cnpg.io + displayName: Postgres Publication + description: Declarative creation and management of a Logical Replication Publication in a PostgreSQL Cluster + version: v1 + resources: + - kind: Cluster + name: '' + version: v1 + specDescriptors: + - path: name + displayName: Publication name + description: Name of the publication for PostgreSQL logical replication + - path: dbname + displayName: Database name + description: Database on which the publication will be created + - path: cluster + displayName: Cluster requested to create the publication + description: Cluster on which the publication will be created + - path: target + displayName: Publication target + description: Specifies which tables/schemas in the database should be published + - path: publicationReclaimPolicy + displayName: Publication reclaim policy + description: Specifies the action to take for the publication inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. + statusDescriptors: + - path: applied + displayName: Applied + description: Applied is true if the publication was reconciled correctly + - path: message + displayName: Message + description: Message is the reconciliation output message + - kind: Subscription + name: subscriptions.postgresql.cnpg.io + displayName: Postgres Subscription + description: Declarative creation and management of a Logical Replication Subscription in a PostgreSQL Cluster to a previously defined Publication + version: v1 + resources: + - kind: Cluster + name: '' + version: v1 + specDescriptors: + - path: name + displayName: Subscription name + description: Name of the subscription for PostgreSQL logical replication + - path: dbname + displayName: Database name + description: Database on which the Subscription will be created + - path: publicationName + displayName: Publication name + description: Name of the Publication to subscribe to + - path: cluster + displayName: Cluster requested to create the subscription + description: Cluster on which the subscription will be created (subscriber) + - path: externalClusterName + displayName: Name of the external cluster with publication + description: Name of the cluster where the Publication is defined (publisher) + - path: publicationDBName + displayName: Name of the database containing the publication on the external cluster + description: The name of the database containing the publication on the external cluster. Defaults to the one in the external cluster definition. + - path: subscriptionReclaimPolicy + displayName: Subscription reclaim policy + description: Specifies the action to take for the subscription inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. + statusDescriptors: + - path: applied + displayName: Applied + description: Applied is true if the subscription was reconciled correctly + - path: message + displayName: Message + description: Message is the reconciliation output message + - kind: FailoverQuorum + name: failoverquorums.postgresql.cnpg.io + displayName: Failover Quorum + description: FailoverQuorum contains the information about the current failover quorum status of a PG cluster + version: v1 + resources: + - kind: Cluster + name: '' + version: v1 diff --git a/config/olm-samples/kustomization.yaml b/config/olm-samples/kustomization.yaml index 205a50a544..6bb494f569 100644 --- a/config/olm-samples/kustomization.yaml +++ b/config/olm-samples/kustomization.yaml @@ -6,3 +6,5 @@ resources: - postgresql_v1_imagecatalog.yaml - postgresql_v1_clusterimagecatalog.yaml - postgresql_v1_database.yaml +- postgresql_v1_publication.yaml +- postgresql_v1_subscription.yaml diff --git a/config/olm-samples/postgresql_v1_backup.yaml b/config/olm-samples/postgresql_v1_backup.yaml index 330ede8589..40147cec70 100644 --- a/config/olm-samples/postgresql_v1_backup.yaml +++ b/config/olm-samples/postgresql_v1_backup.yaml @@ -5,3 +5,5 @@ metadata: spec: cluster: name: cluster-sample +status: + serverName: diff --git a/config/olm-samples/postgresql_v1_cluster.yaml b/config/olm-samples/postgresql_v1_cluster.yaml index 0a8204977b..40f324f07b 100644 --- a/config/olm-samples/postgresql_v1_cluster.yaml +++ b/config/olm-samples/postgresql_v1_cluster.yaml @@ -19,3 +19,5 @@ spec: walStorage: size: 1Gi logLevel: info +status: + instances: 3 diff --git a/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml b/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml index 20e725876d..3ad7041ea2 100644 --- a/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml +++ b/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml @@ -1,3 +1,4 @@ +apiVersion: postgresql.cnpg.io/v1 kind: ClusterImageCatalog metadata: name: postgresql diff --git a/config/olm-samples/postgresql_v1_database.yaml b/config/olm-samples/postgresql_v1_database.yaml index b4d3d56b4d..748cb1ee7a 100644 --- a/config/olm-samples/postgresql_v1_database.yaml +++ b/config/olm-samples/postgresql_v1_database.yaml @@ -7,3 +7,5 @@ spec: owner: app cluster: name: cluster-sample +status: + applied: false diff --git a/config/olm-samples/postgresql_v1_imagecatalog.yaml b/config/olm-samples/postgresql_v1_imagecatalog.yaml index faf6d60a42..f141f90691 100644 --- a/config/olm-samples/postgresql_v1_imagecatalog.yaml +++ b/config/olm-samples/postgresql_v1_imagecatalog.yaml @@ -1,3 +1,4 @@ +apiVersion: postgresql.cnpg.io/v1 kind: ImageCatalog metadata: name: postgresql diff --git a/config/olm-samples/postgresql_v1_pooler.yaml b/config/olm-samples/postgresql_v1_pooler.yaml index 0400ed54c2..1ba730bb17 100644 --- a/config/olm-samples/postgresql_v1_pooler.yaml +++ b/config/olm-samples/postgresql_v1_pooler.yaml @@ -9,3 +9,5 @@ spec: type: rw pgbouncer: poolMode: session +status: + instances: 1 diff --git a/config/olm-samples/postgresql_v1_publication.yaml b/config/olm-samples/postgresql_v1_publication.yaml new file mode 100644 index 0000000000..89a54cbac6 --- /dev/null +++ b/config/olm-samples/postgresql_v1_publication.yaml @@ -0,0 +1,13 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-sample +spec: + name: pub + dbname: app + cluster: + name: cluster-sample + target: + allTables: true +status: + applied: false diff --git a/config/olm-samples/postgresql_v1_scheduledbackup.yaml b/config/olm-samples/postgresql_v1_scheduledbackup.yaml index bd2350fddc..6e61b15d9f 100644 --- a/config/olm-samples/postgresql_v1_scheduledbackup.yaml +++ b/config/olm-samples/postgresql_v1_scheduledbackup.yaml @@ -6,3 +6,5 @@ spec: schedule: "0 0 0 * * *" cluster: name: cluster-sample +status: + lastCheckTime: diff --git a/config/olm-samples/postgresql_v1_subscription.yaml b/config/olm-samples/postgresql_v1_subscription.yaml new file mode 100644 index 0000000000..6047977c3e --- /dev/null +++ b/config/olm-samples/postgresql_v1_subscription.yaml @@ -0,0 +1,13 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: subscription-sample +spec: + name: sub + dbname: app + publicationName: pub + cluster: + name: cluster-sample-dest + externalClusterName: cluster-sample +status: + applied: false diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index fd6200ae97..8237b70d80 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index a547ce213d..416660a77e 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-status-descriptors-test diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml deleted file mode 100644 index bd4af137a9..0000000000 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: ["/metrics"] - verbs: ["get"] diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e4177..0000000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index 46f50c4d66..0000000000 --- a/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: cnpg-manager - namespace: cnpg-system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 1a0b3a02e0..0000000000 --- a/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/name: cloudnative-pg - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - app.kubernetes.io/name: cloudnative-pg diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 99493b37c4..3d9a82e989 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -3,17 +3,14 @@ kind: Kustomization resources: - role.yaml - role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -#- auth_proxy_service.yaml -#- auth_proxy_role.yaml -#- auth_proxy_role_binding.yaml -#- auth_proxy_client_clusterrole.yaml + # For each CRD, "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines # if you do not want those helpers be installed with your Project. +- subscription_editor_role.yaml +- subscription_viewer_role.yaml +- publication_editor_role.yaml +- publication_viewer_role.yaml - database_editor_role.yaml - database_viewer_role.yaml - diff --git a/config/rbac/publication_editor_role.yaml b/config/rbac/publication_editor_role.yaml new file mode 100644 index 0000000000..f741900fa3 --- /dev/null +++ b/config/rbac/publication_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit publications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get diff --git a/config/rbac/publication_viewer_role.yaml b/config/rbac/publication_viewer_role.yaml new file mode 100644 index 0000000000..32e84f531f --- /dev/null +++ b/config/rbac/publication_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view publications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ce1e7ded88..7f6a70c742 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -141,7 +141,9 @@ rules: - clusters - databases - poolers + - publications - scheduledbackups + - subscriptions verbs: - create - delete @@ -155,7 +157,9 @@ rules: resources: - backups/status - databases/status + - publications/status - scheduledbackups/status + - subscriptions/status verbs: - get - patch @@ -180,12 +184,23 @@ rules: - postgresql.cnpg.io resources: - clusters/status + - failoverquorums/status - poolers/status verbs: - get - patch - update - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - failoverquorums + verbs: + - create + - delete + - get + - list + - watch - apiGroups: - rbac.authorization.k8s.io resources: diff --git a/config/rbac/subscription_editor_role.yaml b/config/rbac/subscription_editor_role.yaml new file mode 100644 index 0000000000..066b1c494d --- /dev/null +++ b/config/rbac/subscription_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit subscriptions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get diff --git a/config/rbac/subscription_viewer_role.yaml b/config/rbac/subscription_viewer_role.yaml new file mode 100644 index 0000000000..4cf8ff0d06 --- /dev/null +++ b/config/rbac/subscription_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view subscriptions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 33aafeddb4..595d06293b 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -44,6 +44,26 @@ webhooks: resources: - clusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -110,6 +130,26 @@ webhooks: resources: - clusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None - admissionReviewVersions: - v1 clientConfig: diff --git a/contribute/README.md b/contribute/README.md index 8e88260aab..d5c9d2a266 100644 --- a/contribute/README.md +++ b/contribute/README.md @@ -9,8 +9,9 @@ a good set of docs that guide you through the development process. Having said t we know that everything can always be improved, so if you think our documentation is not enough, let us know or provide a pull request based on your experience. -Feel free to ask in the ["dev" chat](https://cloudnativepg.slack.com/archives/C03D68KGG65) -if you have questions or are seeking guidance. +If you have any questions or need guidance, feel free to reach out in the +[#cloudnativepg-dev](https://cloud-native.slack.com/archives/C08MW1HKF40) channel +on the [CNCF Slack workspace](https://communityinviter.com/apps/cloud-native/cncf). ## About our development workflow @@ -93,15 +94,19 @@ If you have written code for an improvement to CloudNativePG or a bug fix, please follow this procedure to submit a pull request: 1. [Create a fork](development_environment/README.md#forking-the-repository) of CloudNativePG -2. Self-assign the ticket and begin working on it in the forked project. Move - the ticket to `Analysis` or `In Development` phase of +2. **External contributors**: Comment on the issue with "I'd like to work on this" and wait for assignment. + **Maintainers**: Self-assign the ticket and move it to `Analysis` or `In Development` phase of [CloudNativePG operator development](https://github.com/orgs/cloudnative-pg/projects/2) -3. [Run the e2e tests in the forked repository](e2e_testing_environment/README.md#running-e2e-tests-on-a-fork-of-the-repository) +3. **External contributors**: Run local unit tests and basic e2e tests using `FEATURE_TYPE=smoke,basic make e2e-test-kind` or `TEST_DEPTH=0 make e2e-test-kind` for critical tests only. + **Maintainers**: [Run the comprehensive e2e tests in the forked repository](e2e_testing_environment/README.md#running-e2e-tests-on-a-fork-of-the-repository) 4. Once development is finished, create a pull request from your forked project - to the CloudNativePG project and move the ticket to the `Waiting for First Review` - phase. Please make sure the pull request title and message follow - [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) - + to the CloudNativePG project. **Maintainers** will move the ticket to the `Waiting for First Review` + phase. + + > Please make sure the pull request title and message follow + > [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) + + > To facilitate collaboration, always [allow edits by maintainers](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork) One of the maintainers will then proceed with the first review and approve the CI workflow to run in the CloudNativePG project. The second reviewer will run diff --git a/contribute/development_environment/README.md b/contribute/development_environment/README.md index 309081835a..48efe55395 100644 --- a/contribute/development_environment/README.md +++ b/contribute/development_environment/README.md @@ -47,6 +47,7 @@ environment variable: - [golangci-lint](https://github.com/golangci/golangci-lint) - [goreleaser](https://goreleaser.com/) - [Operator SDK CLI](https://sdk.operatorframework.io/) +- [Helm](https://helm.sh/) In addition, check that the following packages are installed in your system: @@ -83,7 +84,8 @@ components in your Mac OS X system: brew install go \ kind \ golangci/tap/golangci-lint \ - goreleaser + goreleaser \ + helm ``` Please note that bash v5.0+ is required, this can be installed with: @@ -180,13 +182,15 @@ build and deploy: ```shell cd cloudnative-pg git checkout main -make deploy-locally +./hack/setup-cluster.sh create load deploy ``` This will build the operator based on the `main` branch content, create a `kind` cluster in your workstation with a container registry that provides the operator image that you just built. +*Note:* For a list of options, run `./hack/setup-cluster.sh`. + > **NOTE:** In case of errors, make sure that you have the latest versions of the Go > binaries in your system. For this reason, from time to time, we recommend > you running: `make distclean`. @@ -202,7 +206,7 @@ kubectl get deploy -n cnpg-system cnpg-controller-manager Now that your system has been validated, you can tear down the local cluster with: ```shell -make kind-cluster-destroy +./hack/setup-cluster.sh destroy ``` Congratulations, you have a suitable development environment. You are now able diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index bbe60be4a0..cd70269c4e 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -12,8 +12,8 @@ PostgreSQL** on **all supported versions of Kubernetes**. This framework is made up by two important components: -- a local and disposable Kubernetes cluster built with `kind` (default) or - `k3d` on which to run the E2E tests +- a local and disposable Kubernetes cluster built with `kind` on which to run + the E2E tests - a set of E2E tests to be run on an existing Kubernetes cluster (including the one above) @@ -56,22 +56,15 @@ All flags have corresponding environment variables labeled `(Env:...` in the tab | Flags | Usage | |-------|-------------------------------------------------------------------------------------------------------------------------------| -| -r |--registry | Enable local registry. (Env: `ENABLE_REGISTRY`) | -| -e |--engine | Use the provided ENGINE to run the cluster. Available options are 'kind' and 'k3d'. Default 'kind'. (Env: `CLUSTER_ENGINE`) | | -k |--k8s-version | Use the specified Kubernetes full version number (e.g., `-k v1.30.0`). (Env: `K8S_VERSION`) | | -n |--nodes | Create a cluster with the required number of nodes. Used only during "create" command. Default: 3 (Env: `NODES`) | - -> **NOTE:** if you want to use custom engine and registry settings, please make -> sure that they are consistent through all invocations either via command line -> options or by defining the respective environment variables - -> **NOTE:** on ARM64 architecture like Apple M1/M2/M3, `kind` and `k3d` provide different -> images for AMD64 and ARM64 nodes. If the **x86/amd64 emulation** is not enabled, +> **NOTE:** on ARM64 architecture like Apple M1/M2/M3, `kind` provides different +> images for AMD64 and ARM64 nodes. If the **x86/amd64 emulation** is not enabled, > the `./hack/setup-cluster.sh` script will correctly detect the architecture > and pass the `DOCKER_DEFAULT_PLATFORM=linux/arm64` environment variable to Docker > to use the ARM64 node image. -> If you want to explicitly use the **x86/amd64 emulation**, you need to set +> If you want to explicitly use the **x86/amd64 emulation**, you need to set > the `DOCKER_DEFAULT_PLATFORM=linux/amd64` environment variable before > calling the `./hack/setup-cluster.sh` script. @@ -91,7 +84,7 @@ will create a deployment, and add two services on ports 6060 and 4040 respectively, in the same namespace as the operator: ``` console -kubectl get svc -n cnpg-system +kubectl get svc -n cnpg-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE cnpg-pprof ClusterIP 10.96.17.58 6060/TCP 9m41s @@ -158,23 +151,7 @@ specifying the following variable * `DOCKER_REGISTRY_MIRROR`: DockerHub mirror URL (i.e. https://mirror.gcr.io) -To run E2E testing you can also use: - -| kind | k3d | -|------------------------------------------------|-------------------------------------------------| -| `TEST_UPGRADE_TO_V1=false make e2e-test-kind` | `TEST_UPGRADE_TO_V1=false make e2e-test-k3d` | - - -### Wrapper scripts for E2E testing - -There are currently two available scripts that wrap cluster setup and -execution of tests. One is for `kind` and one is for `k3d`. They simply embed -`hack/setup-cluster.sh` and `hack/e2e/run-e2e.sh` to create a local Kubernetes -cluster and then run E2E tests on it. - -There is also a script to run E2E tests on an existing `local` Kubernetes cluster. -It tries to detect the appropriate defaults for storage class and volume snapshot class environment variables -by looking at the annotation of the default storage class and the volume snapshot class. +To run E2E testing you can also use `TEST_UPGRADE_TO_V1=false make e2e-test-kind`. ### Using feature type test selection/filter @@ -206,6 +183,9 @@ exported, it will select all medium test cases from the feature type provided. | `security` | | `maintenance` | | `tablespaces` | +| `publication-subscription` | +| `declarative-databases` | +| `postgres-major-upgrade` | ex: ```shell @@ -214,6 +194,19 @@ export FEATURE_TYPE=smoke,basic,service-connectivity This will run smoke, basic and service connectivity e2e. One or many can be passed as value with comma separation without spaces. +### Wrapper scripts for E2E testing + +There is a script available that wraps cluster setup and execution of +tests for `kind`. It embeds `hack/setup-cluster.sh` and +`hack/e2e/run-e2e.sh` to create a local Kubernetes cluster and then +run E2E tests on it. + +There is also a script to run E2E tests on an existing `local` +Kubernetes cluster. It tries to detect the appropriate defaults for +storage class and volume snapshot class environment variables by +looking at the annotation of the default storage class and the volume +snapshot class. + #### On kind You can test the operator locally on `kind` with: @@ -230,29 +223,6 @@ We have also provided a shortcut to this script in the main `Makefile`: make e2e-test-kind ``` -#### On k3d - -You can test the operator locally on `k3d` with: - -``` bash -run-e2e-k3d.sh -``` - -> **NOTE:** error messages, like the example below, that will be shown during -> cluster creation are **NOT** an issue: - -``` -Error response from daemon: manifest for rancher/k3s:v1.20.0-k3s5 not found: manifest unknown: manifest unknown -``` - -The script will take care of creating a K3d cluster and then run the tests on it. - -We have also provided a shortcut to this script in the main `Makefile`: - -```shell -make e2e-test-k3d -``` - #### On existing local cluster You can test the operator locally on `local` with: @@ -296,19 +266,25 @@ the following ones can be defined: * `LOG_DIR`: the directory where the container logs are exported. Default: `_logs/` directory in the project root -`run-e2e-kind.sh` forces `E2E_DEFAULT_STORAGE_CLASS=standard` while -`run-e2e-k3d.sh` forces `E2E_DEFAULT_STORAGE_CLASS=local-path` +`run-e2e-kind.sh` forces `E2E_DEFAULT_STORAGE_CLASS=standard`. -Both scripts use the `setup-cluster.sh` script, in order to initialize the cluster -choosing between `kind` or K3d engine. +The script uses the `setup-cluster.sh` script to initialize the cluster using +the `kind` engine. ### Running E2E tests on a fork of the repository -Additionally, if you fork the repository and want to run the tests on your fork, you can do so +**For maintainers and organization members:** If you fork the repository and want to run the tests on your fork, you can do so by running the `/test` command in a Pull Request opened in your forked repository. `/test` is used to trigger a run of the end-to-end tests in the GitHub Actions. Only users who have `write` permission to the repository can use this command. +**For external contributors:** You can run local e2e tests using: +- `FEATURE_TYPE=smoke,basic make e2e-test-kind` for smoke and basic tests +- `TEST_DEPTH=0 make e2e-test-kind` for critical tests only +- `TEST_DEPTH=1 make e2e-test-kind` for critical and high priority tests + +Maintainers will handle comprehensive cloud-based E2E testing during the pull request review process. + Options supported are: - test_level (`level` or `tl` for short) diff --git a/contribute/lfx-mentorship-program.md b/contribute/lfx-mentorship-program.md new file mode 100644 index 0000000000..76327ccb5f --- /dev/null +++ b/contribute/lfx-mentorship-program.md @@ -0,0 +1,106 @@ +# LFX Mentorship Program + +CloudNativePG, as a CNCF project, proudly supports the +[LFX Mentorship Program](https://lfx.linuxfoundation.org/tools/mentorship/) +by the Linux Foundation. + +This page lists the accepted CloudNativePG mentorship projects and provides +resources for current mentees, prospective applicants, and contributors +interested in getting involved. + +Each mentorship project spans **12 weeks** and is designed to be a **full-time +learning opportunity**, requiring significant commitment and dedication from +mentees. + +*Note:* we use the +["LFX Mentorship" label](https://github.com/cloudnative-pg/cloudnative-pg/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22LFX%20Mentorship%22) +to classify issues and pull requests. + +--- + +## Current Mentorship Programs + +If you’re interested in applying for a future term, we recommend preparing in +the areas outlined below. + +| Year | Term | Project | Mentee | +| ---- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------- | +| 2025 | 3 (Sep–Nov) | [Chaos Testing](https://mentorship.lfx.linuxfoundation.org/project/0858ce07-0c90-47fa-a1a0-95c6762f00ff) | [Yash Agarwal](https://github.com/xploy04) | +| 2025 | 3 (Sep–Nov) | [Rebuild documentation for multi-version support with Docusaurus](https://mentorship.lfx.linuxfoundation.org/project/86a647c1-88c7-474f-b093-6abb58197083) | [Anushka Saxena](https://www.linkedin.com/in/-anushka-saxena/) | + +--- + +## Past Mentorship Programs + +| Year | Term | Project | Mentee | +| ---- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------- | +| 2025 | 2 (Jun–Aug) | [Declarative Management of PostgreSQL FDWs](https://mentorship.lfx.linuxfoundation.org/project/53fa853e-b5fa-4d68-be71-f005c75aea89) | [Ying Zhu](https://github.com/EdwinaZhu) | + +--- + +## Applying to the Mentorship + +We’re excited that you’re considering applying for the mentorship programme! To +give yourself the best chance, here are some tips to help you prepare a strong +application and move smoothly through the process: + +- **Start with a thoughtful CV and cover letter.** These are your first + opportunity to show who you are, what motivates you, and what you hope to + achieve during the mentorship. Take your time to make them clear, personal, and + well-structured. +- **Use AI wisely.** Tools can be great for polishing your writing or checking + grammar, but make sure your application reflects *you*. Mentors can usually + spot when something is fully generated. The mentorship is about your personal + growth over three months, so let your own voice shine through. +- **Apply through the official LFX portal.** To be considered, make sure you + complete the application process there and upload both your CV and cover + letter. +- **Follow the process in the upstream issue.** Any clarifications should be + asked directly in the issue. This keeps everything transparent and ensures + mentors don’t miss your questions. +- **Respect the communication boundaries.** Please avoid asking about the + status of your application in public chats or reaching out to individual + mentors on social media. We know it can feel hard to wait, but sticking to the + official process helps everyone. +- **Understand the selection realities.** There are many projects, lots of + applicants, and only a short time for the selection process. That means not + everyone will be contacted directly. If you don’t hear back, don’t be + discouraged—keep applying and building your skills. + +We’re looking forward to learning more about you and wish you the very best of +luck with your application! + +--- + +## Recommended Preparation + +While each project has its own skill requirements, the program aims to deepen +mentees’ knowledge in the following areas: + +- **Go programming**, with a focus on operator development +- **Kubernetes** and **Custom Resource Definitions (CRDs)** +- **Git** and **GitHub workflows** +- **CloudNativePG architecture and usage** +- **PostgreSQL fundamentals** + +We encourage aspiring mentees to begin familiarising themselves with these +topics in preparation for upcoming application cycles. + +--- + +### Suggested Resources + +Below are some key resources to support your learning journey: + +- [Kubebuilder Book](https://book.kubebuilder.io/) +- [Programming Kubernetes](https://www.oreilly.com/library/view/programming-kubernetes/9781492047094/) + +--- + +## Getting Started as a Mentee + +To hit the ground running, make sure you: + +- Join the [CNCF Slack channel](../README.md#communications) for CloudNativePG +- [Set up your development environment](development_environment/README.md) +- [Run E2E tests locally](e2e_testing_environment/README.md) diff --git a/contribute/release-notes-template.md b/contribute/release-notes-template.md new file mode 100644 index 0000000000..e09753a725 --- /dev/null +++ b/contribute/release-notes-template.md @@ -0,0 +1,70 @@ + +# Release notes for CloudNativePG 1.XX + +History of user-visible changes in the 1.XX minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.XXX +on the release branch in GitHub. + +## Version 1.XX.0-rc1 + +**Release date:** Mon DD, 20YY + +### Important changes: + +- OPTIONAL +- OPTIONAL + +### Features: + +- **MAIN FEATURE #1**: short description +- **MAIN FEATURE #2**: short description + +### Enhancements: + +- Add ... +- Introduce ... +- Allow ... +- Enhance ... +- `cnpg` plugin updates: + - Enhance ... + - Add ... + +### Security: + +- Add ... +- Improve ... + +### Fixes: + +- Enhance ... +- Disable ... +- Gracefully handle ... +- Wait ... +- Fix ... +- Address ... +- `cnpg` plugin: + - ... + - ... + +### Supported versions + +- Kubernetes 1.31, 1.30, and 1.29 +- PostgreSQL 17, 16, 15, 14, and 13 + - PostgreSQL 17.X is the default image + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/contribute/release_procedure.md b/contribute/release_procedure.md index 0e708302e2..18dd0270bf 100644 --- a/contribute/release_procedure.md +++ b/contribute/release_procedure.md @@ -71,6 +71,8 @@ activities: update [`docs/src/release_notes.md`](../docs/src/release_notes.md) and [`.github/ISSUE_TEMPLATE/bug.yml`](../.github/ISSUE_TEMPLATE/bug.yml). These changes should go in a PR against `main`, and get maintainer approval. + Look at the template file to get an idea of how to start a new minor release + version document. - **Capabilities page:** in case of a new minor release, ensure that the operator capability levels page in @@ -142,8 +144,10 @@ This procedure must happen immediately before starting the release. **IMPORTANT:** Now we add support for the automatic backporting of merged pull requests from main to the new release branch. Once the new release branch is created, go back to `main` and submit a pull request to update the -[backport](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/backport.yml) -and [continuous delivery](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-delivery.yml) +[backport](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/backport.yml), +[continuous delivery](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-delivery.yml), +[continuous integration](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-integration.yml) +and [Renovate](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/renovate.json5) workflows to support the new release branch. And also remember to update the [github issue template](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/ISSUE_TEMPLATE/bug.yml). diff --git a/docker-bake.hcl b/docker-bake.hcl new file mode 100644 index 0000000000..c30d3f2d6e --- /dev/null +++ b/docker-bake.hcl @@ -0,0 +1,152 @@ +# +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +variable "environment" { + default = "testing" + validation { + condition = contains(["testing", "production"], environment) + error_message = "environment must be either testing or production" + } +} + +variable "registry" { + default = "localhost:5000" +} + +variable "insecure" { + default = "false" +} + +variable "latest" { + default = "false" +} + +variable "tag" { + default = "dev" +} + +variable "buildVersion" { + default = "dev" +} + +variable "revision" { + default = "" +} + +suffix = (environment == "testing") ? "-testing" : "" + +title = "CloudNativePG Operator" +description = "This Docker image contains CloudNativePG Operator." +authors = "The CloudNativePG Contributors" +url = "https://github.com/cloudnative-pg/cloudnative-pg" +documentation = "https://cloudnative-pg.io/documentation/current/" +license = "Apache-2.0" +now = timestamp() + +distros = { + distroless = { + baseImage = "gcr.io/distroless/static-debian12:nonroot@sha256:6ec5aa99dc335666e79dc64e4a6c8b89c33a543a1967f20d360922a80dd21f02", + tag = "" + } + ubi = { + baseImage = "registry.access.redhat.com/ubi9/ubi-micro:latest@sha256:7e85855f6925e03f91b5c51f07886ff1c18c6ec69b5fc65491428a899da914a2", + tag = "-ubi9" + } +} + +target "default" { + matrix = { + distro = [ + "distroless", + "ubi" + ] + } + + name = "${distro}" + platforms = ["linux/amd64", "linux/arm64"] + tags = [ + "${registry}/cloudnative-pg${suffix}:${tag}${distros[distro].tag}", + latest("${registry}/cloudnative-pg${suffix}", "${latest}"), + ] + + dockerfile = "Dockerfile" + + context = "." + + args = { + BASE = "${distros[distro].baseImage}" + } + + output = [ + "type=image,registry.insecure=${insecure}", + ] + + attest = [ + "type=provenance,mode=max", + "type=sbom" + ] + annotations = [ + "index,manifest:org.opencontainers.image.created=${now}", + "index,manifest:org.opencontainers.image.url=${url}", + "index,manifest:org.opencontainers.image.source=${url}", + "index,manifest:org.opencontainers.image.version=${buildVersion}", + "index,manifest:org.opencontainers.image.revision=${revision}", + "index,manifest:org.opencontainers.image.vendor=${authors}", + "index,manifest:org.opencontainers.image.title=${title}", + "index,manifest:org.opencontainers.image.description=${description}", + "index,manifest:org.opencontainers.image.documentation=${documentation}", + "index,manifest:org.opencontainers.image.authors=${authors}", + "index,manifest:org.opencontainers.image.licenses=${license}", + "index,manifest:org.opencontainers.image.base.name=${distros[distro].baseImage}", + "index,manifest:org.opencontainers.image.base.digest=${digest(distros[distro].baseImage)}", + ] + labels = { + "org.opencontainers.image.created" = "${now}", + "org.opencontainers.image.url" = "${url}", + "org.opencontainers.image.source" = "${url}", + "org.opencontainers.image.version" = "${buildVersion}", + "org.opencontainers.image.revision" = "${revision}", + "org.opencontainers.image.vendor" = "${authors}", + "org.opencontainers.image.title" = "${title}", + "org.opencontainers.image.description" = "${description}", + "org.opencontainers.image.documentation" = "${documentation}", + "org.opencontainers.image.authors" = "${authors}", + "org.opencontainers.image.licenses" = "${license}", + "org.opencontainers.image.base.name" = "${distros[distro].baseImage}", + "org.opencontainers.image.base.digest" = "${digest(distros[distro].baseImage)}", + "name" = "${title}", + "maintainer" = "${authors}", + "vendor" = "${authors}", + "version" = "${buildVersion}", + "release" = "1", + "description" = "${description}", + "summary" = "${description}", + } + +} + +function digest { + params = [ imageNameWithSha ] + result = index(split("@", imageNameWithSha), 1) +} + +function latest { + params = [ image, latest ] + result = (latest == "true") ? "${image}:latest" : "" +} diff --git a/docs/LICENSE b/docs/LICENSE new file mode 100644 index 0000000000..da6ab6cc8f --- /dev/null +++ b/docs/LICENSE @@ -0,0 +1,396 @@ +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. + diff --git a/docs/README.md b/docs/README.md index 692d45a2f3..cf74a9cd25 100644 --- a/docs/README.md +++ b/docs/README.md @@ -71,3 +71,10 @@ consider if they should be included in the curated [list of examples](src/sample And please help keeping the samples in the curated list, as well as any samples named `cluster-example-*` in runnable condition. These can be a big help for beginners. + +## License + +The CloudNativePG documentation and all the work under the `docs` folder is +licensed under a Creative Commons Attribution 4.0 International License. + + diff --git a/docs/config.yaml b/docs/config.yaml index cc5cde9174..94e4522c2a 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -1,5 +1,6 @@ hiddenMemberFields: - "TypeMeta" + - "synchronizeReplicasCache" externalPackages: - match: ^github\.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1\.RelabelConfig$ @@ -24,10 +25,22 @@ externalPackages: target: https://pkg.go.dev/time#Duration - match: ^k8s\.io/(api|apimachinery/pkg/apis)/ target: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#{{- lower .TypeIdentifier -}}-{{- arrIndex .PackageSegments -1 -}}-{{- arrIndex .PackageSegments -2 -}} + - match: ^github\.com/cloudnative-pg/machinery + target: https://pkg.go.dev/github.com/cloudnative-pg/machinery/pkg/api/#{{- .TypeIdentifier }} + - match: ^github\.com/cloudnative-pg/barman-cloud + target: https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api/#{{- .TypeIdentifier }} hideTypePatterns: - "ParseError$" - - "List$" + - "\\.BackupList$" + - "\\.ClusterList$" + - "\\.ClusterImageCatalogList$" + - "\\.DatabaseList$" + - "\\.ImageCatalogList$" + - "\\.PoolerList$" + - "\\.ScheduledBackupList$" + - "\\.PublicationList$" + - "\\.SubscriptionList$" markdownDisabled: false diff --git a/docs/markdown/pkg.tpl b/docs/markdown/pkg.tpl index 345d032ff9..ed4875592b 100644 --- a/docs/markdown/pkg.tpl +++ b/docs/markdown/pkg.tpl @@ -1,6 +1,6 @@ {{ define "packages" -}} - # API Reference + {{ $grpname := "" -}} {{- range $idx, $val := .packages -}} diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 8ea186759c..f510a314e2 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -1,8 +1,10 @@ site_name: CloudNativePG site_author: The CloudNativePG Contributors +copyright: Copyright © CloudNativePG a Series of LF Projects, LLC docs_dir: src -theme: readthedocs +theme: + name: readthedocs extra_css: - css/override.css @@ -11,6 +13,8 @@ markdown_extensions: - admonition - def_list - attr_list + - footnotes + - pymdownx.caret nav: - index.md @@ -29,14 +33,14 @@ nav: - failure_modes.md - rolling_update.md - replication.md + - logical_replication.md - backup.md - - backup_barmanobjectstore.md - wal_archiving.md - - backup_volumesnapshot.md - recovery.md - service_management.md - postgresql_conf.md - declarative_role_management.md + - declarative_database_management.md - tablespaces.md - operator_conf.md - cluster_conf.md @@ -50,6 +54,7 @@ nav: - connection_pooling.md - replica_cluster.md - kubernetes_upgrade.md + - postgres_upgrades.md - kubectl-plugin.md - failover.md - troubleshooting.md @@ -58,16 +63,22 @@ nav: - postgis.md - e2e.md - container_images.md + - imagevolume_extensions.md + - cnpg_i.md - operator_capability_levels.md - controller.md - samples.md - networking.md - benchmarking.md - - commercial_support.md - faq.md - cloudnative-pg.v1.md - supported_releases.md - preview_version.md - release_notes.md + - CNCF Projects Integrations: + - cncf-projects/external-secrets.md + - cncf-projects/cilium.md - Appendixes: + - appendixes/backup_volumesnapshot.md + - appendixes/backup_barmanobjectstore.md - appendixes/object_stores.md diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/appendixes/backup_barmanobjectstore.md similarity index 54% rename from docs/src/backup_barmanobjectstore.md rename to docs/src/appendixes/backup_barmanobjectstore.md index 34b907e0ae..8573315567 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/appendixes/backup_barmanobjectstore.md @@ -1,4 +1,14 @@ -# Backup on object stores +# Appendix B - Backup on object stores + + + +!!! Warning + As of CloudNativePG 1.26, **native Barman Cloud support is deprecated** in + favor of the **Barman Cloud Plugin**. This page has been moved to the appendix + for reference purposes. While the native integration remains functional for + now, we strongly recommend beginning a gradual migration to the plugin-based + interface after appropriate testing. For guidance, see + [Migrating from Built-in CloudNativePG Backup](https://cloudnative-pg.io/plugin-barman-cloud/docs/migration/). CloudNativePG natively supports **online/hot backup** of PostgreSQL clusters through continuous physical backup and WAL archiving on an object @@ -27,23 +37,90 @@ as it is composed of a community PostgreSQL image and the latest A backup is performed from a primary or a designated primary instance in a `Cluster` (please refer to -[replica clusters](replica_cluster.md) +[replica clusters](../replica_cluster.md) for more information about designated primary instances), or alternatively -on a [standby](backup.md#backup-from-a-standby). +on a [standby](../backup.md#backup-from-a-standby). ## Common object stores If you are looking for a specific object store such as -[AWS S3](appendixes/object_stores.md#aws-s3), -[Microsoft Azure Blob Storage](appendixes/object_stores.md#azure-blob-storage), -[Google Cloud Storage](appendixes/object_stores.md#google-cloud-storage), or -[MinIO Gateway](appendixes/object_stores.md#minio-gateway), or a compatible -provider, please refer to [Appendix A - Common object stores](appendixes/object_stores.md). +[AWS S3](object_stores.md#aws-s3), +[Microsoft Azure Blob Storage](object_stores.md#azure-blob-storage), +[Google Cloud Storage](object_stores.md#google-cloud-storage), or a compatible +provider, please refer to [Appendix C - Common object stores for backups](object_stores.md). -## Retention policies +## WAL archiving + +WAL archiving is the process that feeds a [WAL archive](../backup.md#wal-archive) +in CloudNativePG. + +The WAL archive is defined in the `.spec.backup.barmanObjectStore` stanza of +a `Cluster` resource. + +!!! Info + Please refer to [`BarmanObjectStoreConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration) + in the barman-cloud API for a full list of options. + +If required, you can choose to compress WAL files as soon as they +are uploaded and/or encrypt them: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + [...] + wal: + compression: gzip + encryption: AES256 +``` + +You can configure the encryption directly in your bucket, and the operator +will use it unless you override it in the cluster configuration. + +PostgreSQL implements a sequential archiving scheme, where the +`archive_command` will be executed sequentially for every WAL +segment to be archived. !!! Important - Retention policies are not currently available on volume snapshots. + By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring + that WAL files, even in case of low workloads, are closed and archived + at least every 5 minutes, providing a deterministic time-based value for + your Recovery Point Objective ([RPO](../before_you_start.md#rpo)). Even though you change the value + of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), + our experience suggests that the default value set by the operator is + suitable for most use cases. + +When the bandwidth between the PostgreSQL instance and the object +store allows archiving more than one WAL file in parallel, you +can use the parallel WAL archiving feature of the instance manager +like in the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + [...] + wal: + compression: gzip + maxParallel: 8 + encryption: AES256 +``` + +In the previous example, the instance manager optimizes the WAL +archiving process by archiving in parallel at most eight ready +WALs, including the one requested by PostgreSQL. + +When PostgreSQL will request the archiving of a WAL that has +already been archived by the instance manager as an optimization, +that archival request will be just dismissed with a positive status. + +## Retention policies CloudNativePG can manage the automated deletion of backup files from the backup object store, using **retention policies** based on the recovery @@ -93,12 +170,15 @@ algorithms via `barman-cloud-backup` (for backups) and * bzip2 * gzip +* lz4 * snappy +* xz +* zstd The compression settings for backups and WALs are independent. See the -[DataBackupConfiguration](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-DataBackupConfiguration) and -[WALBackupConfiguration](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-WalBackupConfiguration) sections in -the API reference. +[DataBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) and +[WALBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration) sections in +the barman-cloud API reference. It is important to note that archival time, restore time, and size change between the algorithms, so the compression algorithm should be chosen according @@ -153,7 +233,7 @@ spec: You can append additional options to the `barman-cloud-backup` and `barman-cloud-wal-archive` commands by using the `additionalCommandArgs` property in the `.spec.backup.barmanObjectStore.data` and `.spec.backup.barmanObjectStore.wal` sections respectively. -This properties are lists of strings that will be appended to the +These properties are lists of strings that will be appended to the `barman-cloud-backup` and `barman-cloud-wal-archive` commands. For example, you can use the `--read-timeout=60` to customize the connection @@ -198,4 +278,62 @@ spec: additionalCommandArgs: - "--max-concurrency=1" - "--read-timeout=60" -``` \ No newline at end of file +``` + +## Recovery from an object store + +You can recover from a backup created by Barman Cloud and stored on a supported +object store. After you define the external cluster, including all the required +configuration in the `barmanObjectStore` section, you need to reference it in +the `.spec.recovery.source` option. + +This example defines a recovery object store in a blob container in Azure: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + [...] + + superuserSecret: + name: superuser-secret + + bootstrap: + recovery: + source: clusterBackup + + externalClusters: + - name: clusterBackup + barmanObjectStore: + destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ + azureCredentials: + storageAccount: + name: recovery-object-store-secret + key: storage_account_name + storageKey: + name: recovery-object-store-secret + key: storage_account_key + wal: + maxParallel: 8 +``` + +The previous example assumes that the application database and its owning user +are named `app` by default. If the PostgreSQL cluster being restored uses +different names, you must specify these names before exiting the recovery phase, +as documented in ["Configure the application database"](../recovery.md#configure-the-application-database). + +!!! Important + By default, the `recovery` method strictly uses the `name` of the + cluster in the `externalClusters` section as the name of the main folder + of the backup data within the object store. This name is normally reserved + for the name of the server. You can specify a different folder name + using the `barmanObjectStore.serverName` property. + +!!! Note + This example takes advantage of the parallel WAL restore feature, + dedicating up to 8 jobs to concurrently fetch the required WAL files from the + archive. This feature can appreciably reduce the recovery time. Make sure that + you plan ahead for this scenario and correctly tune the value of this parameter + for your environment. It will make a difference when you need it, and you will. diff --git a/docs/src/backup_volumesnapshot.md b/docs/src/appendixes/backup_volumesnapshot.md similarity index 72% rename from docs/src/backup_volumesnapshot.md rename to docs/src/appendixes/backup_volumesnapshot.md index a927b01fa8..6bf555f49b 100644 --- a/docs/src/backup_volumesnapshot.md +++ b/docs/src/appendixes/backup_volumesnapshot.md @@ -1,11 +1,10 @@ -# Backup on volume snapshots +# Appendix A - Backup on volume snapshots + -!!! Warning - As noted in the [backup document](backup.md), a cold snapshot explicitly - set to target the primary will result in the primary being fenced for - the duration of the backup, rendering the cluster read-only during that - For safety, in a cluster already containing fenced instances, a cold - snapshot is rejected. +!!! Important + Please refer to the official Kubernetes documentation for a list of all + the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html) + that provide snapshotting capabilities. CloudNativePG is one of the first known cases of database operators that directly leverages the Kubernetes native Volume Snapshot API for both @@ -52,7 +51,7 @@ volumes of a given storage class, and managed as `VolumeSnapshot` and !!! Important It is your responsibility to verify with the third party vendor that volume snapshots are supported. CloudNativePG only interacts - with the Kubernetes API on this matter and we cannot support issues + with the Kubernetes API on this matter, and we cannot support issues at the storage level for each specific CSI driver. ## How to configure Volume Snapshot backups @@ -61,7 +60,7 @@ CloudNativePG allows you to configure a given Postgres cluster for Volume Snapshot backups through the `backup.volumeSnapshot` stanza. !!! Info - Please refer to [`VolumeSnapshotConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-VolumeSnapshotConfiguration) + Please refer to [`VolumeSnapshotConfiguration`](../cloudnative-pg.v1.md#postgresql-cnpg-io-v1-VolumeSnapshotConfiguration) in the API reference for a full list of options. A generic example with volume snapshots (assuming that PGDATA and WALs share @@ -86,18 +85,21 @@ spec: # Volume snapshot backups volumeSnapshot: className: @VOLUME_SNAPSHOT_CLASS_NAME@ - # WAL archive - barmanObjectStore: - # ... + + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: @OBJECTSTORE_NAME@ ``` As you can see, the `backup` section contains both the `volumeSnapshot` stanza (controlling physical base backups on volume snapshots) and the -`barmanObjectStore` one (controlling the [WAL archive](wal_archiving.md)). +`plugins` one (controlling the [WAL archive](../wal_archiving.md)). !!! Info - Once you have defined the `barmanObjectStore`, you can decide to use - both volume snapshot and object store backup strategies simultaneously + Once you have defined the `plugin`, you can decide to use + both volume snapshot and plugin backup strategies simultaneously to take physical backups. The `volumeSnapshot.className` option allows you to reference the default @@ -115,6 +117,13 @@ a `ScheduledBackup` resource that requests such backups on a periodic basis. ## Hot and cold backups +!!! Warning + As noted in the [backup document](../backup.md), a cold snapshot explicitly + set to target the primary will result in the primary being fenced for + the duration of the backup, making the cluster read-only during this + period. For safety, in a cluster already containing fenced instances, a cold + snapshot is rejected. + By default, CloudNativePG requests an online/hot backup on volume snapshots, using the [PostgreSQL defaults of the low-level API for base backups](https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP): @@ -242,7 +251,95 @@ referenced in the `.spec.backup.volumeSnapshot.className` option. Please refer to the [Kubernetes documentation on Volume Snapshot Classes](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/) for details on this standard behavior. -## Example +## Backup Volume Snapshot Deadlines + +CloudNativePG supports backups using the volume snapshot method. In some +environments, volume snapshots may encounter temporary issues that can be +retried. + +The `backup.cnpg.io/volumeSnapshotDeadline` annotation defines how long +CloudNativePG should continue retrying recoverable errors before marking the +backup as failed. + +You can add the `backup.cnpg.io/volumeSnapshotDeadline` annotation to both +`Backup` and `ScheduledBackup` resources. For `ScheduledBackup` resources, this +annotation is automatically inherited by any `Backup` resources created from +the schedule. + +If not specified, the default retry deadline is **10 minutes**. + +### Error Handling + +When a retryable error occurs during a volume snapshot operation: + +1. CloudNativePG records the time of the first error. +2. The system retries the operation every **10 seconds**. +3. If the error persists beyond the specified deadline (or the default 10 + minutes), the backup is marked as **failed**. + +### Retryable Errors + +CloudNativePG treats the following types of errors as retryable: + +- **Server timeout errors** (HTTP 408, 429, 500, 502, 503, 504) +- **Conflicts** (optimistic locking errors) +- **Internal errors** +- **Context deadline exceeded errors** +- **Timeout errors from the CSI snapshot controller** + +### Examples + +You can add the annotation to a `ScheduledBackup` resource as follows: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: daily-backup-schedule + annotations: + backup.cnpg.io/volumeSnapshotDeadline: "20" +spec: + schedule: "0 0 * * *" + backupOwnerReference: self + method: volumeSnapshot + # other configuration... +``` + +When you define a `ScheduledBackup` with the annotation, any `Backup` resources +created from this schedule automatically inherit the specified timeout value. + +In the following example, all backups created from the schedule will have a +30-minute timeout for retrying recoverable snapshot errors. + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: weekly-backup + annotations: + backup.cnpg.io/volumeSnapshotDeadline: "30" +spec: + schedule: "0 0 * * 0" # Weekly backup on Sunday + method: volumeSnapshot + cluster: + name: my-postgresql-cluster +``` + +Alternatively, you can add the annotation directly to a `Backup` Resource: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: my-backup + annotations: + backup.cnpg.io/volumeSnapshotDeadline: "15" +spec: + method: volumeSnapshot + # other backup configuration... +``` + +## Example of Volume Snapshot Backup The following example shows how to configure volume snapshot base backups on an EKS cluster on AWS using the `ebs-sc` storage class and the `csi-aws-vsc` @@ -256,7 +353,7 @@ volume snapshot class. The following manifest creates a `Cluster` that is ready to be used for volume snapshots and that stores the WAL archive in a S3 bucket via IAM role for the -Service Account (IRSA, see [AWS S3](appendixes/object_stores.md#aws-s3)): +Service Account (IRSA, see [AWS S3](object_stores.md#aws-s3)): ``` yaml apiVersion: postgresql.cnpg.io/v1 @@ -276,13 +373,12 @@ spec: backup: volumeSnapshot: className: csi-aws-vsc - barmanObjectStore: - destinationPath: s3://@BUCKET_NAME@/ - s3Credentials: - inheritFromIAMRole: true - wal: - compression: gzip - maxParallel: 2 + + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: @OBJECTSTORE_NAME@ serviceAccountTemplate: metadata: diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md index 991b8fa14e..73e424abb9 100644 --- a/docs/src/appendixes/object_stores.md +++ b/docs/src/appendixes/object_stores.md @@ -1,4 +1,13 @@ -# Appendix A - Common object stores for backups +# Appendix C - Common object stores for backups + + +!!! Warning + As of CloudNativePG 1.26, **native Barman Cloud support is deprecated** in + favor of the **Barman Cloud Plugin**. While the native integration remains + functional for now, we strongly recommend beginning a gradual migration to + the plugin-based interface after appropriate testing. The Barman Cloud + Plugin documentation describes + [how to use common object stores](https://cloudnative-pg.io/plugin-barman-cloud/docs/object_stores/). You can store the [backup](../backup.md) files in any service that is supported by the Barman Cloud infrastructure. That is: @@ -129,6 +138,7 @@ spec: In case you're using **Digital Ocean Spaces**, you will have to use the Path-style syntax. In this example, it will use the `bucket` from **Digital Ocean Spaces** in the region `SFO3`. + ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Cluster @@ -142,10 +152,31 @@ spec: [...] ``` -!!! Important - Suppose you configure an Object Storage provider which uses a certificate signed with a private CA, - like when using MinIO via HTTPS. In that case, you need to set the option `endpointCA` - referring to a secret containing the CA bundle so that Barman can verify the certificate correctly. +### Using Object Storage with a private CA + +Suppose you configure an Object Storage provider which uses a certificate +signed with a private CA, for example when using MinIO via HTTPS. In that case, +you need to set the option `endpointCA` inside `barmanObjectStore` referring +to a secret containing the CA bundle, so that Barman can verify the certificate +correctly. +You can find instructions on creating a secret using your cert files in the +[certificates](../certificates.md#example) document. +Once you have created the secret, you can populate the `endpointCA` as in the +following example: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +[...] +spec: + [...] + backup: + barmanObjectStore: + endpointURL: + endpointCA: + name: my-ca-secret + key: ca.crt +``` !!! Note If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can @@ -186,7 +217,7 @@ On the other side, using both **Storage account access key** or **Storage accoun the credentials need to be stored inside a Kubernetes Secret, adding data entries only when needed. The following command performs that: -``` +``` sh kubectl create secret generic azure-creds \ --from-literal=AZURE_STORAGE_ACCOUNT= \ --from-literal=AZURE_STORAGE_KEY= \ @@ -226,7 +257,7 @@ spec: When using the Azure Blob Storage, the `destinationPath` fulfills the following structure: -``` +``` sh ://..core.windows.net/ ``` @@ -238,7 +269,7 @@ which is also called **storage account name**, is included in the used host name If you are using a different implementation of the Azure Blob Storage APIs, the `destinationPath` will have the following structure: -``` +``` sh ://:// ``` @@ -266,7 +297,6 @@ without having to set any credentials. In particular, you need to: Please use the following example as a reference: - ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Cluster @@ -320,125 +350,3 @@ Now the operator will use the credentials to authenticate against Google Cloud S This way of authentication will create a JSON file inside the container with all the needed information to access your Google Cloud Storage bucket, meaning that if someone gets access to the pod will also have write permissions to the bucket. - -## MinIO Gateway - -Optionally, you can use MinIO Gateway as a common interface which -relays backup objects to other cloud storage solutions, like S3 or GCS. -For more information, please refer to [MinIO official documentation](https://docs.min.io/). - -Specifically, the CloudNativePG cluster can directly point to a local -MinIO Gateway as an endpoint, using previously created credentials and service. - -MinIO secrets will be used by both the PostgreSQL cluster and the MinIO instance. -Therefore, you must create them in the same namespace: - -```sh -kubectl create secret generic minio-creds \ - --from-literal=MINIO_ACCESS_KEY= \ - --from-literal=MINIO_SECRET_KEY= -``` - -!!! Note - Cloud Object Storage credentials will be used only by MinIO Gateway in this case. - -!!! Important - In order to allow PostgreSQL to reach MinIO Gateway, it is necessary to create a - `ClusterIP` service on port `9000` bound to the MinIO Gateway instance. - -For example: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: minio-gateway-service -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - selector: - app: minio -``` - -!!! Warning - At the time of writing this documentation, the official - [MinIO Operator](https://github.com/minio/minio-operator/issues/71) - for Kubernetes does not support the gateway feature. As such, we will use a - `deployment` instead. - -The MinIO deployment will use cloud storage credentials to upload objects to the -remote bucket and relay backup files to different locations. - -Here is an example using AWS S3 as Cloud Object Storage: - -```yaml -apiVersion: apps/v1 -kind: Deployment -[...] -spec: - containers: - - name: minio - image: minio/minio:RELEASE.2020-06-03T22-13-49Z - args: - - gateway - - s3 - env: - # MinIO access key and secret key - - name: MINIO_ACCESS_KEY - valueFrom: - secretKeyRef: - name: minio-creds - key: MINIO_ACCESS_KEY - - name: MINIO_SECRET_KEY - valueFrom: - secretKeyRef: - name: minio-creds - key: MINIO_SECRET_KEY - # AWS credentials - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: aws-creds - key: ACCESS_KEY_ID - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: aws-creds - key: ACCESS_SECRET_KEY -# Uncomment the below section if session token is required -# - name: AWS_SESSION_TOKEN -# valueFrom: -# secretKeyRef: -# name: aws-creds -# key: ACCESS_SESSION_TOKEN - ports: - - containerPort: 9000 -``` - -Proceed by configuring MinIO Gateway service as the `endpointURL` in the `Cluster` -definition, then choose a bucket name to replace `BUCKET_NAME`: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - destinationPath: s3://BUCKET_NAME/ - endpointURL: http://minio-gateway-service:9000 - s3Credentials: - accessKeyId: - name: minio-creds - key: MINIO_ACCESS_KEY - secretAccessKey: - name: minio-creds - key: MINIO_SECRET_KEY - [...] -``` - -Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before -proceeding with a backup. diff --git a/docs/src/applications.md b/docs/src/applications.md index fea987587b..f2a7f8dc06 100644 --- a/docs/src/applications.md +++ b/docs/src/applications.md @@ -1,4 +1,5 @@ # Connecting from an application + Applications are supposed to work with the services created by CloudNativePG in the same Kubernetes cluster. @@ -71,6 +72,13 @@ Each secret contain the following: * a working [`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html) * [uri](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) * [jdbc-uri](https://jdbc.postgresql.org/documentation/use/#connecting-to-the-database) +* [fqdn-uri](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) +* [fqdn-jdbc-uri](https://jdbc.postgresql.org/documentation/use/#connecting-to-the-database) + +The FQDN to be used in the URIs is calculated using the Kubernetes cluster +domain specified in the `KUBERNETES_CLUSTER_DOMAIN` configuration parameter. +See [the operator configuration documentation](operator_conf.md) for more information +about that. The `-app` credentials are the ones that should be used by applications connecting to the PostgreSQL cluster, and correspond to the user *owning* the @@ -81,4 +89,3 @@ and correspond to the `postgres` user. !!! Important Superuser access over the network is disabled by default. - diff --git a/docs/src/architecture.md b/docs/src/architecture.md index 43f1244343..486ddd538e 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -1,4 +1,5 @@ # Architecture + !!! Hint For a deeper understanding, we recommend reading our article on the CNCF @@ -135,7 +136,7 @@ the [replica cluster feature](replica_cluster.md)). ![Example of a Kubernetes architecture with only 2 data centers](./images/k8s-architecture-2-az.png) !!! Hint - If you are at en early stage of your Kubernetes journey, please share this + If you are at an early stage of your Kubernetes journey, please share this document with your infrastructure team. The two data centers setup might be simply the result of a "lift-and-shift" transition to Kubernetes from a traditional bare-metal or VM based infrastructure, and the benefits @@ -235,7 +236,7 @@ CloudNativePG recommends using the `node-role.kubernetes.io/postgres` taint. To assign the `postgres` taint to a node, use the following command: ```sh -kubectl taint node node-role.kubernetes.io/postgres=:noSchedule +kubectl taint node node-role.kubernetes.io/postgres=:NoSchedule ``` To ensure that a `Cluster` resource is scheduled on a node with a `postgres` taint, you must correctly configure the `.spec.affinity.tolerations` stanza in your manifests. @@ -354,11 +355,12 @@ only write inside a single Kubernetes cluster, at any time. However, for business continuity objectives it is fundamental to: -- reduce global **recovery point objectives** (RPO) by storing PostgreSQL backup data - in multiple locations, regions and possibly using different providers - (Disaster Recovery) -- reduce global **recovery time objectives** (RTO) by taking advantage of PostgreSQL - replication beyond the primary Kubernetes cluster (High Availability) +- reduce global **recovery point objectives** ([RPO](before_you_start.md#rpo)) + by storing PostgreSQL backup data in multiple locations, regions and possibly + using different providers (Disaster Recovery) +- reduce global **recovery time objectives** ([RTO](before_you_start.md#rto)) + by taking advantage of PostgreSQL replication beyond the primary Kubernetes + cluster (High Availability) In order to address the above concerns, CloudNativePG introduces the concept of a PostgreSQL Topology that is distributed across different Kubernetes clusters @@ -409,9 +411,10 @@ This is typically triggered by: declarative configuration, enabling you to automate these procedures as part of your Infrastructure as Code (IaC) process, including GitOps. -The designated primary in the above example is fed via WAL streaming -(`primary_conninfo`), with fallback option for file-based WAL shipping through -the `restore_command` and `barman-cloud-wal-restore`. +In the example above, the designated primary receives WAL updates via streaming +replication (`primary_conninfo`). As a fallback, it can retrieve WAL segments +from an object store using file-based WAL shipping—for instance, with the +Barman Cloud plugin through `restore_command` and `barman-cloud-wal-restore`. CloudNativePG allows you to define topologies with multiple replica clusters. You can also define replica clusters with a lower number of replicas, and then diff --git a/docs/src/backup.md b/docs/src/backup.md index fac42c56f9..99ee3f5e08 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -1,4 +1,42 @@ # Backup + + +!!! Info + This section covers **physical backups** in PostgreSQL. + While PostgreSQL also supports logical backups using the `pg_dump` utility, + these are **not suitable for business continuity** and are **not managed** by + CloudNativePG. If you still wish to use `pg_dump`, refer to the + [*Troubleshooting / Emergency backup* section](troubleshooting.md#emergency-backup) + for guidance. + +!!! Important + Starting with version 1.26, native backup and recovery capabilities are + being **progressively phased out** of the core operator and moved to official + CNPG-I plugins. This transition aligns with CloudNativePG's shift towards a + **backup-agnostic architecture**, enabled by its extensible + interface—**CNPG-I**—which standardizes the management of **WAL archiving**, + **physical base backups**, and corresponding **recovery processes**. + +CloudNativePG currently supports **physical backups of PostgreSQL clusters** in +two main ways: + +- **Via [CNPG-I](https://github.com/cloudnative-pg/cnpg-i/) plugins**: the + CloudNativePG Community officially supports the [**Barman Cloud Plugin**](https://cloudnative-pg.io/plugin-barman-cloud/) + for integration with object storage services. + +- **Natively**, with support for: + + - [Object storage via Barman Cloud](appendixes/backup_barmanobjectstore.md) + *(although deprecated from 1.26 in favor of the Barman Cloud Plugin)* + - [Kubernetes Volume Snapshots](appendixes/backup_volumesnapshot.md), if + supported by the underlying storage class + +Before selecting a backup strategy with CloudNativePG, it's important to +familiarize yourself with the foundational concepts covered in the ["Main Concepts"](#main-concepts) +section. These include WAL archiving, hot and cold backups, performing backups +from a standby, and more. + +## Main Concepts PostgreSQL natively provides first class backup and recovery capabilities based on file system level (physical) copy. These have been successfully used for @@ -6,14 +44,6 @@ more than 15 years in mission critical production databases, helping organizations all over the world achieve their disaster recovery goals with Postgres. -!!! Note - There's another way to backup databases in PostgreSQL, through the - `pg_dump` utility - which relies on logical backups instead of physical ones. - However, logical backups are not suitable for business continuity use cases - and as such are not covered by CloudNativePG (yet, at least). - If you want to use the `pg_dump` utility, let yourself be inspired by the - ["Troubleshooting / Emergency backup" section](troubleshooting.md#emergency-backup). - In CloudNativePG, the backup infrastructure for each PostgreSQL cluster is made up of the following resources: @@ -22,26 +52,11 @@ up of the following resources: - **Physical base backups**: a copy of all the files that PostgreSQL uses to store the data in the database (primarily the `PGDATA` and any tablespace) -The WAL archive can only be stored on object stores at the moment. - -On the other hand, CloudNativePG supports two ways to store physical base backups: - -- on [object stores](backup_barmanobjectstore.md), as tarballs - optionally - compressed -- on [Kubernetes Volume Snapshots](backup_volumesnapshot.md), if supported by - the underlying storage class - -!!! Important - Before choosing your backup strategy with CloudNativePG, it is important that - you take some time to familiarize with some basic concepts, like WAL archive, - hot and cold backups. - -!!! Important - Please refer to the official Kubernetes documentation for a list of all - the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html) - that provide snapshotting capabilities. +CNPG-I provides a generic and extensible interface for managing WAL archiving +(both archive and restore operations), as well as the base backup and +corresponding restore processes. -## WAL archive +### WAL archive The WAL archive in PostgreSQL is at the heart of **continuous backup**, and it is fundamental for the following reasons: @@ -49,7 +64,7 @@ is fundamental for the following reasons: - **Hot backups**: the possibility to take physical base backups from any instance in the Postgres cluster (either primary or standby) without shutting down the server; they are also known as online backups -- **Point in Time recovery** (PITR): to possibility to recover at any point in +- **Point in Time recovery** (PITR): the possibility to recover at any point in time from the first available base backup in your system !!! Warning @@ -66,24 +81,25 @@ as they can simply rely on the WAL archive to synchronize across long distances, extending disaster recovery goals across different regions. When you [configure a WAL archive](wal_archiving.md), CloudNativePG provides -out-of-the-box an RPO <= 5 minutes for disaster recovery, even across regions. +out-of-the-box an [RPO](before_you_start.md#rpo) <= 5 minutes for disaster +recovery, even across regions. !!! Important Our recommendation is to always setup the WAL archive in production. - There are known use cases - normally involving staging and development - environments - where none of the above benefits are needed and the WAL + There are known use cases — normally involving staging and development + environments — where none of the above benefits are needed and the WAL archive is not necessary. RPO in this case can be any value, such as 24 hours (daily backups) or infinite (no backup at all). -## Cold and Hot backups +### Cold and Hot backups Hot backups have already been defined in the previous section. They require the -presence of a WAL archive and they are the norm in any modern database management -system. +presence of a WAL archive, and they are the norm in any modern database +management system. **Cold backups**, also known as offline backups, are instead physical base backups taken when the PostgreSQL instance (standby or primary) is shut down. They are -consistent per definition and they represent a snapshot of the database at the +consistent per definition, and they represent a snapshot of the database at the time it was shut down. As a result, PostgreSQL instances can be restarted from a cold backup without @@ -95,78 +111,107 @@ In those situations with a higher RPO (for example, 1 hour or 24 hours), and shorter retention periods, cold backups represent a viable option to be considered for your disaster recovery plans. -## Object stores or volume snapshots: which one to use? +## Comparing Available Backup Options: Object Stores vs Volume Snapshots + +CloudNativePG currently supports two main approaches for physical backups: + +- **Object store–based backups**, via the [**Barman Cloud + Plugin**](https://cloudnative-pg.io/plugin-barman-cloud/) or the + [**deprecated native integration**](appendixes/backup_barmanobjectstore.md) +- [**Volume Snapshots**](appendixes/backup_volumesnapshot.md), using the + Kubernetes CSI interface and supported storage classes + +!!! Important + CNPG-I is designed to enable third parties to build and integrate their own + backup plugins. Over time, we expect the ecosystem of supported backup + solutions to grow. + +### Object Store–Based Backups + +Backups to an object store (e.g. AWS S3, Azure Blob, GCS): -In CloudNativePG, object store based backups: +- Always require WAL archiving +- Support hot backups only +- Do not support incremental or differential copies +- Support retention policies -- always require the WAL archive -- support hot backup only -- don't support incremental copy -- don't support differential copy +### Volume Snapshots -VolumeSnapshots instead: +Native volume snapshots: -- don't require the WAL archive, although in production it is always recommended -- support incremental copy, depending on the underlying storage classes -- support differential copy, depending on the underlying storage classes -- also support cold backup +- Do not require WAL archiving, though its use is still strongly + recommended in production +- Support incremental and differential copies, depending on the + capabilities of the underlying storage class +- Support both hot and cold backups +- Do not support retention policies -Which one to use depends on your specific requirements and environment, -including: +### Choosing Between the Two -- availability of a viable object store solution in your Kubernetes cluster -- availability of a trusted storage class that supports volume snapshots -- size of the database: with object stores, the larger your database, the - longer backup and, most importantly, recovery procedures take (the latter - impacts RTO); in presence of Very Large Databases (VLDB), the general - advice is to rely on Volume Snapshots as, thanks to copy-on-write, they - provide faster recovery -- data mobility and possibility to store or relay backup files on a - secondary location in a different region, or any subsequent one -- other factors, mostly based on the confidence and familiarity with the - underlying storage solutions +The best approach depends on your environment and operational requirements. +Consider the following factors: -The summary table below highlights some of the main differences between the two -available methods for storing physical base backups. +- **Object store availability**: Ensure your Kubernetes cluster can access a + reliable object storage solution, including a stable networking layer. +- **Storage class capabilities**: Confirm that your storage class supports CSI + volume snapshots with incremental/differential features. +- **Database size**: For very large databases (VLDBs), **volume snapshots are + generally preferred** as they enable faster recovery due to copy-on-write + technology—this significantly improves your + [Recovery Time Objective (RTO)](before_you_start.md#rto). +- **Data mobility**: Object store–based backups may offer greater flexibility + for replicating or storing backups across regions or environments. +- **Operational familiarity**: Choose the method that aligns best with your + team's experience and confidence in managing storage. -| | Object store | Volume Snapshots | +### Comparison Summary + +| Feature | Object Store | Volume Snapshots | |-----------------------------------|:------------:|:--------------------:| -| **WAL archiving** | Required | Recommended (1) | -| **Cold backup** | ✗ | ✓ | -| **Hot backup** | ✓ | ✓ | -| **Incremental copy** | ✗ | ✓ (2) | -| **Differential copy** | ✗ | ✓ (2) | -| **Backup from a standby** | ✓ | ✓ | -| **Snapshot recovery** | ✗ (3) | ✓ | -| **Point In Time Recovery (PITR)** | ✓ | Requires WAL archive | -| **Underlying technology** | Barman Cloud | Kubernetes API | - - -> See the explanation below for the notes in the above table: +| **WAL archiving** | Required | Recommended^1^ | +| **Cold backup** | ❌ | ✅ | +| **Hot backup** | ✅ | ✅ | +| **Incremental copy** | ❌ | ✅^2^ | +| **Differential copy** | ❌ | ✅^2^ | +| **Backup from a standby** | ✅ | ✅ | +| **Snapshot recovery** | ❌^3^ | ✅ | +| **Retention policies** | ✅ | ❌ | +| **Point-in-Time Recovery (PITR)** | ✅ | Requires WAL archive | +| **Underlying technology** | Barman Cloud | Kubernetes API | + +--- + +> **Notes:** > -> 1. WAL archive must be on an object store at the moment -> 2. If supported by the underlying storage classes of the PostgreSQL volumes -> 3. Snapshot recovery can be emulated using the -> `bootstrap.recovery.recoveryTarget.targetImmediate` option +> 1. WAL archiving must currently use an object store through a plugin (or the +> deprecated native one). +> 2. Availability of incremental and differential copies depends on the +> capabilities of the storage class used for PostgreSQL volumes. +> 3. Snapshot recovery can be emulated by using the +> `bootstrap.recovery.recoveryTarget.targetImmediate` option. -## Scheduled backups +## Scheduled Backups -Scheduled backups are the recommended way to configure your backup strategy in -CloudNativePG. They are managed by the `ScheduledBackup` resource. +Scheduled backups are the recommended way to implement a reliable backup +strategy in CloudNativePG. They are defined using the `ScheduledBackup` custom +resource. !!! Info - Please refer to [`ScheduledBackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ScheduledBackupSpec) - in the API reference for a full list of options. + For a complete list of configuration options, refer to the + [`ScheduledBackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ScheduledBackupSpec) + in the API reference. + +### Cron Schedule -The `schedule` field allows you to define a *six-term cron schedule* specification, -which includes seconds, as expressed in -the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format). +The `schedule` field defines **when** the backup should occur, using a +*six-field cron expression* that includes seconds. This format follows the +[Go `cron` package specification](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format). !!! Warning - Beware that this format accepts also the `seconds` field, and it is - different from the `crontab` format in Unix/Linux systems. + This format differs from the traditional Unix/Linux `crontab`—it includes a + **seconds** field as the first entry. -This is an example of a scheduled backup: +Example of a daily scheduled backup: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -174,62 +219,73 @@ kind: ScheduledBackup metadata: name: backup-example spec: - schedule: "0 0 0 * * *" + schedule: "0 0 0 * * *" # At midnight every day backupOwnerReference: self cluster: name: pg-backup + # method: plugin, volumeSnapshot, or barmanObjectStore (default) ``` -The above example will schedule a backup every day at midnight because the schedule -specifies zero for the second, minute, and hour, while specifying wildcard, meaning all, -for day of the month, month, and day of the week. +The schedule `"0 0 0 * * *"` triggers a backup every day at midnight +(00:00:00). In Kubernetes CronJobs, the equivalent expression would be `0 0 * * *`, +since seconds are not supported. -In Kubernetes CronJobs, the equivalent expression is `0 0 * * *` because seconds -are not included. +### Backup Frequency and RTO !!! Hint - Backup frequency might impact your recovery time object (RTO) after a - disaster which requires a full or Point-In-Time recovery operation. Our - advice is that you regularly test your backups by recovering them, and then - measuring the time it takes to recover from scratch so that you can refine - your RTO predictability. Recovery time is influenced by the size of the - base backup and the amount of WAL files that need to be fetched from the archive - and replayed during recovery (remember that WAL archiving is what enables - continuous backup in PostgreSQL!). - Based on our experience, a weekly base backup is more than enough for most - cases - while it is extremely rare to schedule backups more frequently than once - a day. - -You can choose whether to schedule a backup on a defined object store or a -volume snapshot via the `.spec.method` attribute, by default set to -`barmanObjectStore`. If you have properly defined -[volume snapshots](backup_volumesnapshot.md#how-to-configure-volume-snapshot-backups) -in the `backup` stanza of the cluster, you can set `method: volumeSnapshot` -to start scheduling base backups on volume snapshots. - -ScheduledBackups can be suspended, if needed, by setting `.spec.suspend: true`. -This will stop any new backup from being scheduled until the option is removed -or set back to `false`. - -In case you want to issue a backup as soon as the ScheduledBackup resource is created -you can set `.spec.immediate: true`. + The frequency of your backups directly impacts your **Recovery Time Objective** + ([RTO](before_you_start.md#rto)). -!!! Note - `.spec.backupOwnerReference` indicates which ownerReference should be put inside - the created backup resources. +To optimize your disaster recovery strategy based on continuous backup: + +- Regularly test restoring from your backups. +- Measure the time required for a full recovery. +- Account for the size of base backups and the number of WAL files that must be + retrieved and replayed. + +In most cases, a **weekly base backup** is sufficient. It is rare to schedule +full backups more frequently than once per day. + +### Immediate Backup + +To trigger a backup immediately when the `ScheduledBackup` is created: + +```yaml +spec: + immediate: true +``` + +### Pause Scheduled Backups + +To temporarily stop scheduled backups from running: + +```yaml +spec: + suspend: true +``` + +### Backup Owner Reference (`.spec.backupOwnerReference`) - - *none:* no owner reference for created backup objects (same behavior as before the field was introduced) - - *self:* sets the Scheduled backup object as owner of the backup - - *cluster:* set the cluster as owner of the backup +Controls which Kubernetes object is set as the owner of the backup resource: -## On-demand backups +- `none`: No owner reference (legacy behavior) +- `self`: The `ScheduledBackup` object becomes the owner +- `cluster`: The PostgreSQL cluster becomes the owner + +## On-Demand Backups + +On-demand backups allow you to manually trigger a backup operation at any time +by creating a `Backup` resource. !!! Info - Please refer to [`BackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BackupSpec) - in the API reference for a full list of options. + For a full list of available options, see the + [`BackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BackupSpec) in the + API reference. + +### Example: Requesting an On-Demand Backup -To request a new backup, you need to create a new `Backup` resource -like the following one: +To start an on-demand backup, apply a `Backup` request custom resource like the +following: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -242,20 +298,23 @@ spec: name: pg-backup ``` -In this case, the operator will start to orchestrate the cluster to take the -required backup on an object store, using `barman-cloud-backup`. You can check -the backup status using the plain `kubectl describe backup ` command: +In this example, the operator will orchestrate the backup process using the +`barman-cloud-backup` tool and store the backup in the configured object store. + +### Monitoring Backup Progress + +You can check the status of the backup using: + +```bash +kubectl describe backup backup-example +``` + +While the backup is in progress, you'll see output similar to: ```text Name: backup-example Namespace: default -Labels: -Annotations: API Version: postgresql.cnpg.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.cnpg.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 +... Spec: Cluster: Name: pg-backup @@ -265,59 +324,95 @@ Status: Events: ``` -When the backup has been completed, the phase will be `completed` -like in the following example: +Once the backup has successfully completed, the `phase` will be set to +`completed`, and the output will include additional metadata: ```text Name: backup-example Namespace: default -Labels: -Annotations: API Version: postgresql.cnpg.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.cnpg.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 -Spec: - Cluster: - Name: pg-backup +... Status: Backup Id: 20201026T135740 Destination Path: s3://backups/ Endpoint URL: http://minio:9000 Phase: completed - s3Credentials: + S3 Credentials: Access Key Id: - Key: ACCESS_KEY_ID Name: minio + Key: ACCESS_KEY_ID Secret Access Key: - Key: ACCESS_SECRET_KEY - Name: minio - Server Name: pg-backup - Started At: 2020-10-26T13:57:40Z - Stopped At: 2020-10-26T13:57:44Z -Events: + Name: minio + Key: ACCESS_SECRET_KEY + Server Name: pg-backup + Started At: 2020-10-26T13:57:40Z + Stopped At: 2020-10-26T13:57:44Z ``` -!!!Important - This feature will not backup the secrets for the superuser and the - application user. The secrets are supposed to be backed up as part of - the standard backup procedures for the Kubernetes cluster. +--- -## Backup from a standby +!!! Important + On-demand backups do **not** include Kubernetes secrets for the PostgreSQL + superuser or application user. You should ensure these secrets are included in + your broader Kubernetes cluster backup strategy. - -Taking a base backup requires to scrape the whole data content of the -PostgreSQL instance on disk, possibly resulting in I/O contention with the -actual workload of the database. +## Backup Methods -For this reason, CloudNativePG allows you to take advantage of a -feature which is directly available in PostgreSQL: **backup from a standby**. +CloudNativePG currently supports the following backup methods for scheduled +and on-demand backups: -By default, backups will run on the most aligned replica of a `Cluster`. If -no replicas are available, backups will run on the primary instance. +- `plugin` – Uses a CNPG-I plugin (requires `.spec.pluginConfiguration`) +- `volumeSnapshot` – Uses native [Kubernetes volume snapshots](appendixes/backup_volumesnapshot.md#how-to-configure-volume-snapshot-backups) +- `barmanObjectStore` – Uses [Barman Cloud for object storage](appendixes/backup_barmanobjectstore.md) + *(deprecated starting with v1.26 in favor of the + [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/), + but still the default for backward compatibility)* -!!! Info +Specify the method using the `.spec.method` field (defaults to +`barmanObjectStore`). + +If your cluster is configured to support volume snapshots, you can enable +scheduled snapshot backups like this: + +```yaml +spec: + method: volumeSnapshot +``` + +To use the Barman Cloud Plugin as the backup method, set `method: plugin` and +configure the plugin accordingly. You can find an example in the +["Performing a Base Backup" section of the plugin documentation](https://cloudnative-pg.io/plugin-barman-cloud/docs/usage/#performing-a-base-backup) + +## Backup from a Standby + +Taking a base backup involves reading the entire on-disk data set of a +PostgreSQL instance, which can introduce I/O contention and impact the +performance of the active workload. + +To reduce this impact, **CloudNativePG supports taking backups from a standby +instance**, leveraging PostgreSQL’s built-in capability to perform backups from +read-only replicas. + +By default, backups are performed on the **most up-to-date replica** in the +cluster. If no replicas are available, the backup will fall back to the +**primary instance**. + +!!! Note + The examples in this section are focused on backup target selection and do not + take the backup method (`spec.method`) into account, as it is not relevant to + the scope being discussed. + +### How It Works + +When `prefer-standby` is the target (the default behavior), CloudNativePG will +attempt to: + +1. Identify the most synchronized standby node. +2. Run the backup process on that standby. +3. Fall back to the primary if no standbys are available. + +This strategy minimizes interference with the primary’s workload. + +!!! Warning Although the standby might not always be up to date with the primary, in the time continuum from the first available backup to the last archived WAL this is normally irrelevant. The base backup indeed @@ -328,8 +423,10 @@ no replicas are available, backups will run on the primary instance. primary. This might produce unexpected results in the short term (before `archive_timeout` kicks in) in deployments with low write activity. -If you prefer to always run backups on the primary, you can set the backup -target to `primary` as outlined in the example below: +### Forcing Backup on the Primary + +To always run backups on the primary instance, explicitly set the backup target +to `primary` in the cluster configuration: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -342,21 +439,16 @@ spec: ``` !!! Warning - Beware of setting the target to primary when performing a cold backup - with volume snapshots, as this will shut down the primary for - the time needed to take the snapshot, impacting write operations. - This also applies to taking a cold backup in a single-instance cluster, even - if you did not explicitly set the primary as the target. - -When the backup target is set to `prefer-standby`, such policy will ensure -backups are run on the most up-to-date available secondary instance, or if no -other instance is available, on the primary instance. + Be cautious when using `primary` as the target for **cold backups using + volume snapshots**, as this will require shutting down the primary instance + temporarily—interrupting all write operations. The same caution applies to + single-instance clusters, even if you haven't explicitly set the target. -By default, when not otherwise specified, target is automatically set to take -backups from a standby. +### Overriding the Cluster-Wide Target -The backup target specified in the `Cluster` can be overridden in the `Backup` -and `ScheduledBackup` types, like in the following example: +You can override the cluster-level target on a per-backup basis, using either +`Backup` or `ScheduledBackup` resources. Here's an example of an on-demand +backup: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -369,6 +461,24 @@ spec: target: "primary" ``` -In the previous example, CloudNativePG will invariably choose the primary -instance even if the `Cluster` is set to prefer replicas. +In this example, even if the cluster’s default target is `prefer-standby`, the +backup will be taken from the primary instance. +## Retention Policies + +CloudNativePG is evolving toward a **backup-agnostic architecture**, where +backup responsibilities are delegated to external **CNPG-I plugins**. These +plugins are expected to offer advanced and customizable data protection +features, including sophisticated retention management, that go beyond the +built-in capabilities and scope of CloudNativePG. + +As part of this transition, the `spec.backup.retentionPolicy` field in the +`Cluster` resource is **deprecated** and will be removed in a future release. + +For more details on available retention features, refer to your chosen plugin’s documentation. +For example: ["Retention Policies" with Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/docs/retention/). + +!!! Important + Users are encouraged to rely on the retention mechanisms provided by the + backup plugin they are using. This ensures better flexibility and consistency + with the backup method in use. diff --git a/docs/src/backup_recovery.md b/docs/src/backup_recovery.md index 942ae45f11..742ceb403b 100644 --- a/docs/src/backup_recovery.md +++ b/docs/src/backup_recovery.md @@ -1,3 +1,4 @@ # Backup and Recovery + [Backup](backup.md) and [recovery](recovery.md) are in two separate sections. diff --git a/docs/src/before_you_start.md b/docs/src/before_you_start.md index 2d6c0377fe..4d6935631a 100644 --- a/docs/src/before_you_start.md +++ b/docs/src/before_you_start.md @@ -1,4 +1,5 @@ # Before You Start + Before we get started, it is essential to go over some terminology that is specific to Kubernetes and PostgreSQL. @@ -131,6 +132,13 @@ PVC group belonging to the same PostgreSQL instance, namely the main volume containing the PGDATA (`storage`) and the volume for WALs (`walStorage`). +RTO +: Acronym for "recovery time objective", the amount of time a system can be + unavailable without adversely impacting the application. + +RPO +: Acronym for "recovery point objective", a calculation of the level of + acceptable data loss following a disaster recovery scenario. ## Cloud terminology diff --git a/docs/src/benchmarking.md b/docs/src/benchmarking.md index 57cb7588fe..291d84689b 100644 --- a/docs/src/benchmarking.md +++ b/docs/src/benchmarking.md @@ -1,4 +1,5 @@ # Benchmarking + The CNPG kubectl plugin provides an easy way for benchmarking a PostgreSQL deployment in Kubernetes using CloudNativePG. @@ -78,6 +79,17 @@ kubectl cnpg pgbench \ -- --time 30 --client 1 --jobs 1 ``` +By default, jobs do not expire. You can enable automatic deletion with the +`--ttl` flag. The job will be deleted after the specified duration (in seconds). + +```shell +kubectl cnpg pgbench \ + --job-name pgbench-run \ + --ttl 600 \ + cluster-example \ + -- --time 30 --client 1 --jobs 1 +``` + If you want to run a `pgbench` job on a specific worker node, you can use the `--node-selector` option. Suppose you want to run the previous initialization job on a node having the `workload=pgbench` label, you can run: @@ -159,7 +171,7 @@ It will: 1. Create a fio deployment composed by a single Pod, which will run fio on the PVC, create graphs after completing the benchmark and start serving the generated files with a webserver. We use the - [`fio-tools`](https://github.com/wallnerryan/fio-tools`) image for that. + [`fio-tools`](https://github.com/wallnerryan/fio-tools) image for that. The Pod created by the deployment will be ready when it starts serving the results. You can forward the port of the pod created by the deployment diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 53f78de012..0dd68f2f41 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -1,6 +1,7 @@ # Bootstrap + -This section describes the options you have to create a new +This section describes the options available to create a new PostgreSQL cluster and the design rationale behind them. There are primarily two ways to bootstrap a new cluster: @@ -8,23 +9,24 @@ There are primarily two ways to bootstrap a new cluster: - from an existing PostgreSQL cluster, either directly (`pg_basebackup`) or indirectly through a physical base backup (`recovery`) -The `initdb` bootstrap also offers the possibility to import one or more -databases from an existing Postgres cluster, even outside Kubernetes, and -having a different major version of Postgres. +The `initdb` bootstrap also provides the option to import one or more +databases from an existing PostgreSQL cluster, even if it's outside +Kubernetes or running a different major version of PostgreSQL. For more detailed information about this feature, please refer to the ["Importing Postgres databases"](database_import.md) section. !!! Important - Bootstrapping from an existing cluster opens up the possibility - to create a **replica cluster**, that is an independent PostgreSQL - cluster which is in continuous recovery, synchronized with the source - and that accepts read-only connections. + Bootstrapping from an existing cluster enables the creation of a + **replica cluster**—an independent PostgreSQL cluster that remains in + continuous recovery, stays synchronized with the source cluster, and + accepts read-only connections. + For more details, refer to the [Replica Cluster section](replica_cluster.md). !!! Warning CloudNativePG requires both the `postgres` user and database to - always exists. Using the local Unix Domain Socket, it needs to connect - as `postgres` user to the `postgres` database via `peer` authentication in - order to perform administrative tasks on the cluster. + always exist. Using the local Unix Domain Socket, it needs to connect + as the `postgres` user to the `postgres` database via `peer` authentication in + order to perform administrative tasks on the cluster. **DO NOT DELETE** the `postgres` user or the `postgres` database!!! !!! Info @@ -45,18 +47,25 @@ specification. CloudNativePG currently supports the following bootstrap methods: existing cluster and, if needed, replaying all the available WAL files or up to a given *point in time* - `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of - the same major version using `pg_basebackup` via streaming replication protocol - - useful if you want to migrate databases to CloudNativePG, even - from outside Kubernetes. + the same major version using `pg_basebackup` through the streaming + replication protocol. This method is particularly useful for migrating + databases to CloudNativePG, although meeting all requirements can be + challenging. Be sure to review the warnings in the + [`pg_basebackup` subsection](#bootstrap-from-a-live-cluster-pg_basebackup) + carefully. -Differently from the `initdb` method, both `recovery` and `pg_basebackup` +Only one bootstrap method can be specified in the manifest. +Attempting to define multiple bootstrap methods will result in validation errors. + +In contrast to the `initdb` method, both `recovery` and `pg_basebackup` create a new cluster based on another one (either offline or online) and can be used to spin up replica clusters. They both rely on the definition of external clusters. +Refer to the [replica cluster section](replica_cluster.md) for more information. -Given that there are several possible backup methods and combinations of backup -storage that the CloudNativePG operator provides, please refer to the -["Recovery" section](recovery.md) for guidance on each method. +Given the amount of possible backup methods and combinations of backup +storage that the CloudNativePG operator provides for `recovery`, please refer to +the dedicated ["Recovery" section](recovery.md) for guidance on each method. !!! Seealso "API reference" Please refer to the ["API reference for the `bootstrap` section](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BootstrapConfiguration) @@ -64,9 +73,9 @@ storage that the CloudNativePG operator provides, please refer to the ## The `externalClusters` section -The `externalClusters` section provides a mechanism for specifying one or more -PostgreSQL clusters associated with the current configuration. Its primary use -cases include: +The `externalClusters` section of the cluster manifest can be used to configure +access to one or more PostgreSQL clusters as *sources*. +The primary use cases include: 1. **Importing Databases:** Specify an external source to be utilized during the [importation of databases](database_import.md) via logical backup and @@ -87,7 +96,7 @@ As far as bootstrapping is concerned, `externalClusters` can be used to define the source PostgreSQL cluster for either the `pg_basebackup` method or the `recovery` one. An external cluster needs to have: -- a name that identifies the origin cluster, to be used as a reference via the +- a name that identifies the external cluster, to be used as a reference via the `source` option - at least one of the following: @@ -98,13 +107,20 @@ method or the `recovery` one. An external cluster needs to have: - the catalog of physical base backups for the Postgres cluster !!! Note - A recovery object store is normally an AWS S3, or an Azure Blob Storage, - or a Google Cloud Storage source that is managed by Barman Cloud. + A recovery object store is normally an AWS S3, Azure Blob Storage, + or Google Cloud Storage source that is managed by Barman Cloud. When only the streaming connection is defined, the source can be used for the `pg_basebackup` method. When only the recovery object store is defined, the -source can be used for the `recovery` method. When both are defined, any of the -two bootstrap methods can be chosen. +source can be used for the `recovery` method. When both are defined, any of +the two bootstrap methods can be chosen. The following table summarizes your +options: + +| Content of externalClusters | pg_basebackup | recovery | +|:----------------------------|:-------------:|:--------:| +| Only streaming | ✓ | | +| Only object store | | ✓ | +| Streaming and object store | ✓ | ✓ | Furthermore, in case of `pg_basebackup` or full `recovery` point in time, the cluster is eligible for replica cluster mode. This means that the cluster is @@ -121,7 +137,7 @@ Whenever a password is supplied within an `externalClusters` entry, CloudNativePG autonomously manages a [PostgreSQL password file](https://www.postgresql.org/docs/current/libpq-pgpass.html) for it, residing at `/controller/external/NAME/pgpass` in each instance. -This approach empowers CloudNativePG to securely establish connections with an +This approach enables CloudNativePG to securely establish connections with an external server without exposing any passwords in the connection string. Instead, the connection safely references the aforementioned file through the `passfile` connection parameter. @@ -204,36 +220,87 @@ The user that owns the database defaults to the database name instead. The application user is not used internally by the operator, which instead relies on the superuser to reconcile the cluster with the desired status. -### Passing options to `initdb` +### Passing Options to `initdb` -The actual PostgreSQL data directory is created via an invocation of the -`initdb` PostgreSQL command. If you need to add custom options to that command -(i.e., to change the `locale` used for the template databases or to add data -checksums), you can use the following parameters: +The PostgreSQL data directory is initialized using the +[`initdb` PostgreSQL command](https://www.postgresql.org/docs/current/app-initdb.html). + +CloudNativePG enables you to customize the behavior of `initdb` to modify +settings such as default locale configurations and data checksums. + +!!! Warning + CloudNativePG acts only as a direct proxy to `initdb` for locale-related + options, due to the ongoing and significant enhancements in PostgreSQL's locale + support. It is your responsibility to ensure that the correct options are + provided, following the PostgreSQL documentation, and to verify that the + bootstrap process completes successfully. + +To include custom options in the `initdb` command, you can use the following +parameters: + +builtinLocale +: When `builtinLocale` is set to a value, CloudNativePG passes it to the + `--builtin-locale` option in `initdb`. This option controls the builtin locale, as + defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) + from the PostgreSQL documentation (default: empty). Note that this option requires + `localeProvider` to be set to `builtin`. Available from PostgreSQL 17. dataChecksums -: When `dataChecksums` is set to `true`, CNPG invokes the `-k` option in +: When `dataChecksums` is set to `true`, CloudNativePG invokes the `-k` option in `initdb` to enable checksums on data pages and help detect corruption by the I/O system - that would otherwise be silent (default: `false`). encoding -: When `encoding` set to a value, CNPG passes it to the `--encoding` option in `initdb`, - which selects the encoding of the template database (default: `UTF8`). +: When `encoding` set to a value, CloudNativePG passes it to the `--encoding` + option in `initdb`, which selects the encoding of the template database + (default: `UTF8`). + +icuLocale +: When `icuLocale` is set to a value, CloudNativePG passes it to the + `--icu-locale` option in `initdb`. This option controls the ICU locale, as + defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) + from the PostgreSQL documentation (default: empty). + Note that this option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + +icuRules +: When `icuRules` is set to a value, CloudNativePG passes it to the + `--icu-rules` option in `initdb`. This option controls the ICU locale, as + defined in ["Locale + Support"](https://www.postgresql.org/docs/current/locale.html) from the + PostgreSQL documentation (default: empty). Note that this option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + +locale +: When `locale` is set to a value, CloudNativePG passes it to the `--locale` + option in `initdb`. This option controls the locale, as defined in + ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from + the PostgreSQL documentation. By default, the locale parameter is empty. In + this case, environment variables such as `LANG` are used to determine the + locale. Be aware that these variables can vary between container images, + potentially leading to inconsistent behavior. localeCollate -: When `localeCollate` is set to a value, CNPG passes it to the `--lc-collate` +: When `localeCollate` is set to a value, CloudNativePG passes it to the `--lc-collate` option in `initdb`. This option controls the collation order (`LC_COLLATE` subcategory), as defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the PostgreSQL documentation (default: `C`). localeCType -: When `localeCType` is set to a value, CNPG passes it to the `--lc-ctype` option in +: When `localeCType` is set to a value, CloudNativePG passes it to the `--lc-ctype` option in `initdb`. This option controls the collation order (`LC_CTYPE` subcategory), as defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the PostgreSQL documentation (default: `C`). +localeProvider +: When `localeProvider` is set to a value, CloudNativePG passes it to the `--locale-provider` +option in `initdb`. This option controls the locale provider, as defined in +["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the +PostgreSQL documentation (default: empty, which means `libc` for PostgreSQL). +Available from PostgreSQL 15. + walSegmentSize -: When `walSegmentSize` is set to a value, CNPG passes it to the `--wal-segsize` +: When `walSegmentSize` is set to a value, CloudNativePG passes it to the `--wal-segsize` option in `initdb` (default: not set - defined by PostgreSQL as 16 megabytes). !!! Note @@ -364,9 +431,9 @@ spec: ## Bootstrap from another cluster -CloudNativePG enables the bootstrap of a cluster starting from +CloudNativePG enables bootstrapping a cluster starting from another one of the same major version. -This operation can happen by connecting directly to the source cluster via +This operation can be carried out either connecting directly to the source cluster via streaming replication (`pg_basebackup`), or indirectly via an existing physical *base backup* (`recovery`). @@ -377,56 +444,73 @@ by `name` (our recommendation is to use the same `name` of the origin cluster). By default the `recovery` method strictly uses the `name` of the cluster in the `externalClusters` section to locate the main folder of the backup data within the object store, which is normally reserved - for the name of the server. You can specify a different one with the - `barmanObjectStore.serverName` property (by default assigned to the - value of `name` in the external cluster definition). + for the name of the server. Backup plugins provide ways to specify a + different one. For example, the Barman Cloud Plugin provides the [`serverName` parameter](https://cloudnative-pg.io/plugin-barman-cloud/docs/parameters/) + (by default assigned to the value of `name` in the external cluster definition). ### Bootstrap from a backup (`recovery`) -Given the several possibilities, methods, and combinations that the -CloudNativePG operator provides in terms of backup and recovery, please refer -to the ["Recovery" section](recovery.md). +Given the variety of backup methods and combinations of backup storage +options provided by the CloudNativePG operator for `recovery`, please refer +to the dedicated ["Recovery" section](recovery.md) for detailed guidance on +each method. ### Bootstrap from a live cluster (`pg_basebackup`) -The `pg_basebackup` bootstrap mode lets you create a new cluster (*target*) as -an exact physical copy of an existing and **binary compatible** PostgreSQL -instance (*source*), through a valid *streaming replication* connection. -The source instance can be either a primary or a standby PostgreSQL server. +The `pg_basebackup` bootstrap mode allows you to create a new cluster +(*target*) as an exact physical copy of an existing and **binary-compatible** +PostgreSQL instance (*source*) managed by CloudNativePG, using a valid +*streaming replication* connection. The source instance can either be a primary +or a standby PostgreSQL server. It’s crucial to thoroughly review the +requirements section below, as the pros and cons of PostgreSQL physical +replication fully apply. -The primary use case for this method is represented by **migrations** to CloudNativePG, -either from outside Kubernetes or within Kubernetes (e.g., from another operator). +The primary use cases for this method include: -!!! Warning - The current implementation creates a *snapshot* of the origin PostgreSQL - instance when the cloning process terminates and immediately starts - the created cluster. See ["Current limitations"](#current-limitations) below for details. +- Reporting and business intelligence clusters that need to be regenerated + periodically (daily, weekly) +- Test databases containing live data that require periodic regeneration + (daily, weekly, monthly) and anonymization +- Rapid spin-up of a standalone replica cluster +- Physical migrations of CloudNativePG clusters to different namespaces or + Kubernetes clusters -Similar to the case of the `recovery` bootstrap method, once the clone operation -completes, the operator will take ownership of the target cluster, starting from -the first instance. This includes overriding some configuration parameters, as -required by CloudNativePG, resetting the superuser password, creating -the `streaming_replica` user, managing the replicas, and so on. The resulting -cluster will be completely independent of the source instance. +!!! Important + Avoid using this method, based on physical replication, to migrate an + existing PostgreSQL cluster outside of Kubernetes into CloudNativePG, unless you + are completely certain that all [requirements](#requirements) are met and + the operation has been + thoroughly tested. The CloudNativePG community does not endorse this approach + for such use cases, and recommends using logical import instead. It is + exceedingly rare that all requirements for physical replication are met in a + way that seamlessly works with CloudNativePG. + +!!! Warning + In its current implementation, this method clones the source PostgreSQL + instance, thereby creating a *snapshot*. Once the cloning process has finished, + the new cluster is immediately started. + Refer to ["Current limitations"](#current-limitations) for more details. + +Similar to the `recovery` bootstrap method, once the cloning operation is +complete, the operator takes full ownership of the target cluster, starting +from the first instance. This includes overriding certain configuration +parameters as required by CloudNativePG, resetting the superuser password, +creating the `streaming_replica` user, managing replicas, and more. The +resulting cluster operates independently from the source instance. !!! Important - Configuring the network between the target instance and the source instance - goes beyond the scope of CloudNativePG documentation, as it depends - on the actual context and environment. + Configuring the network connection between the target and source instances + lies outside the scope of CloudNativePG documentation, as it depends heavily on + the specific context and environment. -The streaming replication client on the target instance, which will be -transparently managed by `pg_basebackup`, can authenticate itself on the source -instance in any of the following ways: +The streaming replication client on the target instance, managed transparently +by `pg_basebackup`, can authenticate on the source instance using one of the +following methods: -1. via [username/password](#usernamepassword-authentication) -2. via [TLS client certificate](#tls-certificate-authentication) +1. [Username/password](#usernamepassword-authentication) +2. [TLS client certificate](#tls-certificate-authentication) -The latter is the recommended one if you connect to a source managed -by CloudNativePG or configured for TLS authentication. -The first option is, however, the most common form of authentication to a -PostgreSQL server in general, and might be the easiest way if the source -instance is on a traditional environment outside Kubernetes. -Both cases are explained below. +Both authentication methods are detailed below. #### Requirements @@ -504,7 +588,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 16.4 cluster, +The following manifest creates a new PostgreSQL 17.5 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -519,7 +603,7 @@ metadata: name: target-db spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 bootstrap: pg_basebackup: @@ -539,7 +623,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 16.4). +the same PostgreSQL version (in our case 17.5). #### TLS certificate authentication @@ -554,7 +638,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 16.4 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 17.5 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -569,7 +653,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 bootstrap: pg_basebackup: @@ -650,7 +734,10 @@ instance using a second connection (see the `--wal-method=stream` option for Once the backup is completed, the new instance will be started on a new timeline and diverge from the source. For this reason, it is advised to stop all write operations to the source database -before migrating to the target database in Kubernetes. +before migrating to the target database. + +Note that this limitation applies only if the target cluster is not defined as +a replica cluster. !!! Important Before you attempt a migration, you must test both the procedure diff --git a/docs/src/certificates.md b/docs/src/certificates.md index b5e2d1f49f..28585e8191 100644 --- a/docs/src/certificates.md +++ b/docs/src/certificates.md @@ -1,4 +1,5 @@ # Certificates + CloudNativePG was designed to natively support TLS certificates. To set up a cluster, the operator requires: @@ -50,6 +51,11 @@ expiration (within a 90-day validity period). certificates not controlled by CloudNativePG must be re-issued following the renewal process. +When generating certificates, the operator assumes that the Kubernetes +cluster's DNS zone is set to `cluster.local` by default. This behavior can be +customized by setting the `KUBERNETES_CLUSTER_DOMAIN` environment variable. A +convenient alternative is to use the [operator's configuration capability](operator_conf.md). + ### Server certificates #### Server CA secret @@ -129,14 +135,14 @@ Given the following files: Create a secret containing the CA certificate: -``` +``` sh kubectl create secret generic my-postgresql-server-ca \ --from-file=ca.crt=./server-ca.crt ``` Create a secret with the TLS certificate: -``` +``` sh kubectl create secret tls my-postgresql-server \ --cert=./server.crt --key=./server.key ``` @@ -257,6 +263,36 @@ the following parameters: instances, you can add a label with the key `cnpg.io/reload` to it. Otherwise, you must reload the instances using the `kubectl cnpg reload` subcommand. +#### Customizing the `streaming_replica` client certificate + +In some environments, it may not be possible to generate a certificate with the +common name `streaming_replica` due to company policies or other security +concerns, such as a CA shared across multiple clusters. In such cases, the user +mapping feature can be used to allow authentication as the `streaming_replica` +user with certificates containing different common names. + +To configure this setup, add a `pg_ident.conf` entry for the predefined map +named `cnpg_streaming_replica`. + +For example, to enable `streaming_replica` authentication using a certificate +with the common name `streaming-replica.cnpg.svc.cluster.local`, add the +following to your cluster definition: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + postgresql: + pg_ident: + - cnpg_streaming_replica streaming-replica.cnpg.svc.cluster.local streaming_replica +``` + +For further details on how `pg_ident.conf` is managed by the operator, see the +["PostgreSQL Configuration" page](postgresql_conf.md#the-pg_ident-section) in +the documentation. + #### Cert-manager example This simple example shows how to use [cert-manager](https://cert-manager.io/) diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 72aa2d1b8f..ba0d3211ea 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1,4 +1,5 @@ # API Reference +

Package v1 contains API Schema definitions for the postgresql v1 API group

@@ -9,15 +10,19 @@ - [Backup](#postgresql-cnpg-io-v1-Backup) - [Cluster](#postgresql-cnpg-io-v1-Cluster) - [ClusterImageCatalog](#postgresql-cnpg-io-v1-ClusterImageCatalog) +- [Database](#postgresql-cnpg-io-v1-Database) +- [FailoverQuorum](#postgresql-cnpg-io-v1-FailoverQuorum) - [ImageCatalog](#postgresql-cnpg-io-v1-ImageCatalog) - [Pooler](#postgresql-cnpg-io-v1-Pooler) +- [Publication](#postgresql-cnpg-io-v1-Publication) - [ScheduledBackup](#postgresql-cnpg-io-v1-ScheduledBackup) +- [Subscription](#postgresql-cnpg-io-v1-Subscription) ## Backup {#postgresql-cnpg-io-v1-Backup} -

Backup is the Schema for the backups API

+

A Backup resource is a request for a PostgreSQL backup by the user.

@@ -118,6 +123,78 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
+## Database {#postgresql-cnpg-io-v1-Database} + + + +

Database is the Schema for the databases API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Database
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+DatabaseSpec +
+

Specification of the desired Database. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+DatabaseStatus +
+

Most recently observed status of the Database. This data may not be up to +date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +## FailoverQuorum {#postgresql-cnpg-io-v1-FailoverQuorum} + + +**Appears in:** + + + +

FailoverQuorum contains the information about the current failover +quorum status of a PG cluster. It is updated by the instance manager +of the primary node and reset to zero by the operator to trigger +an update.

+ + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
FailoverQuorum
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
status
+FailoverQuorumStatus +
+

Most recently observed status of the failover quorum.

+
+ ## ImageCatalog {#postgresql-cnpg-io-v1-ImageCatalog} @@ -185,6 +262,39 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## Publication {#postgresql-cnpg-io-v1-Publication} + + + +

Publication is the Schema for the publications API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Publication
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+PublicationSpec +
+ No description provided.
status [Required]
+PublicationStatus +
+ No description provided.
+ ## ScheduledBackup {#postgresql-cnpg-io-v1-ScheduledBackup} @@ -223,6 +333,39 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## Subscription {#postgresql-cnpg-io-v1-Subscription} + + + +

Subscription is the Schema for the subscriptions API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Subscription
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+SubscriptionSpec +
+ No description provided.
status [Required]
+SubscriptionStatus +
+ No description provided.
+ ## AffinityConfiguration {#postgresql-cnpg-io-v1-AffinityConfiguration} @@ -366,7 +509,7 @@ documentation

barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration +github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration

The configuration for the barman-cloud tool suite

@@ -492,7 +635,7 @@ plugin for this backup

Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE

-tablespaceName [Required]
+tablespaceName
string @@ -543,13 +686,13 @@ information that could be needed to correctly restore it.

FieldDescription LocalObjectReference
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference (Members of LocalObjectReference are embedded into this type.) No description provided. endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

EndpointCA store the CA bundle of the barman endpoint. @@ -575,7 +718,7 @@ errors with certificate issuer and barman-cloud-wal-archive.

FieldDescription cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The cluster to backup

@@ -643,14 +786,14 @@ Overrides the default settings specified in the cluster '.backup.volumeSnapshot. FieldDescription BarmanCredentials
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials +github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials (Members of BarmanCredentials are embedded into this type.)

The potential credentials for each cloud provider

endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

EndpointCA store the CA bundle of the barman endpoint. @@ -809,13 +952,20 @@ parameter is omitted

The backup method being used

-online [Required]
+online
bool

Whether the backup was online/hot (true) or offline/cold (false)

+pluginMetadata
+map[string]string + + +

A map containing the plugin metadata

+ + @@ -912,7 +1062,7 @@ by applications. Defaults to the value of the database key.

secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Name of the secret containing the initial credentials for the @@ -959,6 +1109,48 @@ enabling checksums on data pages (default: false)

The value to be passed as option --lc-ctype for initdb (default:C)

+locale
+string + + +

Sets the default collation order and character classification in the new database.

+ + +localeProvider
+string + + +

This option sets the locale provider for databases created in the new cluster. +Available from PostgreSQL 16.

+ + +icuLocale
+string + + +

Specifies the ICU locale when the ICU provider is used. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 15.

+ + +icuRules
+string + + +

Specifies additional collation rules to customize the behavior of the default collation. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 16.

+ + +builtinLocale
+string + + +

Specifies the locale name when the builtin provider is used. +This option requires localeProvider to be set to builtin. +Available from PostgreSQL 17.

+ + walSegmentSize
int @@ -1082,7 +1274,7 @@ by applications. Defaults to the value of the database key.

secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Name of the secret containing the initial credentials for the @@ -1178,7 +1370,7 @@ by applications. Defaults to the value of the database key.

secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Name of the secret containing the initial credentials for the @@ -1490,7 +1682,7 @@ Undefined or 0 disable synchronous replication.

superuserSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The secret containing the superuser password. If not defined a new @@ -1517,7 +1709,7 @@ user by setting it to NULL. Disabled by default.

imagePullSecrets
-[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The list of pull secrets to be used to pull the images

@@ -1631,7 +1823,7 @@ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information.

-ephemeralVolumesSizeLimit [Required]
+ephemeralVolumesSizeLimit
EphemeralVolumesSizeLimitConfiguration @@ -1762,14 +1954,22 @@ advisable for any PostgreSQL cluster employed for development/staging purposes.

-plugins [Required]
-PluginConfigurationList +plugins
+[]PluginConfiguration

The plugins configuration, containing any plugin to be loaded with the corresponding configuration

+probes
+ProbesConfiguration + + +

The configuration of the probes to be injected +in the PostgreSQL Pods.

+ + @@ -1865,7 +2065,7 @@ any plugin to be loaded with the corresponding configuration

during a switchover or a failover

-lastPromotionToken [Required]
+lastPromotionToken
string @@ -1983,36 +2183,41 @@ configmap data

The first recoverability point, stored as a date in RFC3339 format. -This field is calculated from the content of FirstRecoverabilityPointByMethod

+This field is calculated from the content of FirstRecoverabilityPointByMethod.

+

Deprecated: the field is not set for backup plugins.

firstRecoverabilityPointByMethod
map[BackupMethod]meta/v1.Time -

The first recoverability point, stored as a date in RFC3339 format, per backup method type

+

The first recoverability point, stored as a date in RFC3339 format, per backup method type.

+

Deprecated: the field is not set for backup plugins.

lastSuccessfulBackup
string -

Last successful backup, stored as a date in RFC3339 format -This field is calculated from the content of LastSuccessfulBackupByMethod

+

Last successful backup, stored as a date in RFC3339 format. +This field is calculated from the content of LastSuccessfulBackupByMethod.

+

Deprecated: the field is not set for backup plugins.

lastSuccessfulBackupByMethod
map[BackupMethod]meta/v1.Time -

Last successful backup, stored as a date in RFC3339 format, per backup method type

+

Last successful backup, stored as a date in RFC3339 format, per backup method type.

+

Deprecated: the field is not set for backup plugins.

lastFailedBackup
string -

Stored as a date in RFC3339 format

+

Last failed backup, stored as a date in RFC3339 format.

+

Deprecated: the field is not set for backup plugins.

cloudNativePGCommitHash
@@ -2086,21 +2291,21 @@ This field is reported when .spec.failoverDelay is populated or dur

OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster

-azurePVCUpdateEnabled
-bool +image
+string -

AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster

+

Image contains the image name used by the pods

-image
-string +pgDataImageInfo
+ImageInfo -

Image contains the image name used by the pods

+

PGDataImageInfo contains the details of the latest image that has run on the current data directory.

-pluginStatus [Required]
+pluginStatus
[]PluginStatus @@ -2124,6 +2329,13 @@ TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO WAL file, and Time of latest checkpoint

+systemID
+string + + +

SystemID is the latest detected PostgreSQL SystemID

+ + @@ -2153,6 +2365,21 @@ Map keys are the config map names, map values are the versions

+## DataDurabilityLevel {#postgresql-cnpg-io-v1-DataDurabilityLevel} + +(Alias of `string`) + +**Appears in:** + +- [SynchronousReplicaConfiguration](#postgresql-cnpg-io-v1-SynchronousReplicaConfiguration) + + +

DataDurabilityLevel specifies how strictly to enforce synchronous replication +when cluster instances are unavailable. Options are required or preferred.

+ + + + ## DataSource {#postgresql-cnpg-io-v1-DataSource} @@ -2192,37 +2419,79 @@ PostgreSQL cluster from an existing storage

-## Database {#postgresql-cnpg-io-v1-Database} +## DatabaseObjectSpec {#postgresql-cnpg-io-v1-DatabaseObjectSpec} +**Appears in:** -

Database is the Schema for the databases API

+- [ExtensionSpec](#postgresql-cnpg-io-v1-ExtensionSpec) + +- [FDWSpec](#postgresql-cnpg-io-v1-FDWSpec) + +- [SchemaSpec](#postgresql-cnpg-io-v1-SchemaSpec) + + +

DatabaseObjectSpec contains the fields which are common to every +database object

- +

Name of the extension/schema

+ - - +
FieldDescription
metadata [Required]
-meta/v1.ObjectMeta +
name [Required]
+string
- No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
-DatabaseSpec +
ensure
+EnsureOption
-

Specification of the desired Database. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+

Specifies whether an extension/schema should be present or absent in +the database. If set to present, the extension/schema will be +created if it does not exist. If set to absent, the +extension/schema will be removed if it exists.

status
-DatabaseStatus +
+ +## DatabaseObjectStatus {#postgresql-cnpg-io-v1-DatabaseObjectStatus} + + +**Appears in:** + +- [DatabaseStatus](#postgresql-cnpg-io-v1-DatabaseStatus) + + +

DatabaseObjectStatus is the status of the managed database objects

+ + + + + + + + + + + + @@ -2273,7 +2542,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- - [Database](#postgresql-cnpg-io-v1-Database) -

DatabaseSpec is the specification of a Postgresql Database

+

DatabaseSpec is the specification of a Postgresql Database, built around the +CREATE DATABASE, ALTER DATABASE, and DROP DATABASE SQL commands of +PostgreSQL.

FieldDescription
name [Required]
+string
-

Most recently observed status of the Database. This data may not be up to -date. Populated by the system. Read-only. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+

The name of the object

+
applied [Required]
+bool +
+

True of the object has been installed successfully in +the database

+
message
+string +
+

Message is the object reconciliation message

@@ -2283,68 +2554,187 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- core/v1.LocalObjectReference + + + - - - - - - - -
-

The corresponding cluster

+

The name of the PostgreSQL cluster hosting the database.

+
ensure
+EnsureOption +
+

Ensure the PostgreSQL database is present or absent - defaults to "present".

name [Required]
string
-

The name inside PostgreSQL

+

The name of the database to create inside PostgreSQL. This setting cannot be changed.

owner [Required]
string
-

The owner

+

Maps to the OWNER parameter of CREATE DATABASE. +Maps to the OWNER TO command of ALTER DATABASE. +The role name of the user who owns the database inside PostgreSQL.

encoding
+
template
string
-

The encoding (cannot be changed)

+

Maps to the TEMPLATE parameter of CREATE DATABASE. This setting +cannot be changed. The name of the template from which to create +this database.

isTemplate
-bool +
encoding
+string
-

True when the database is a template

+

Maps to the ENCODING parameter of CREATE DATABASE. This setting +cannot be changed. Character set encoding to use in the database.

allowConnections
-bool +
locale
+string
-

True when connections to this database are allowed

+

Maps to the LOCALE parameter of CREATE DATABASE. This setting +cannot be changed. Sets the default collation order and character +classification in the new database.

connectionLimit
-int +
localeProvider
+string
-

Connection limit, -1 means no limit and -2 means the -database is not valid

+

Maps to the LOCALE_PROVIDER parameter of CREATE DATABASE. This +setting cannot be changed. This option sets the locale provider for +databases created in the new cluster. Available from PostgreSQL 16.

tablespace
+
localeCollate
string
-

The default tablespace of this database

+

Maps to the LC_COLLATE parameter of CREATE DATABASE. This +setting cannot be changed.

databaseReclaimPolicy
-DatabaseReclaimPolicy +
localeCType
+string
-

The policy for end-of-life maintenance of this database

+

Maps to the LC_CTYPE parameter of CREATE DATABASE. This setting +cannot be changed.

+icuLocale
+string + + +

Maps to the ICU_LOCALE parameter of CREATE DATABASE. This +setting cannot be changed. Specifies the ICU locale when the ICU +provider is used. This option requires localeProvider to be set to +icu. Available from PostgreSQL 15.

+ + +icuRules
+string + + +

Maps to the ICU_RULES parameter of CREATE DATABASE. This setting +cannot be changed. Specifies additional collation rules to customize +the behavior of the default collation. This option requires +localeProvider to be set to icu. Available from PostgreSQL 16.

+ + +builtinLocale
+string + + +

Maps to the BUILTIN_LOCALE parameter of CREATE DATABASE. This +setting cannot be changed. Specifies the locale name when the +builtin provider is used. This option requires localeProvider to +be set to builtin. Available from PostgreSQL 17.

+ + +collationVersion
+string + + +

Maps to the COLLATION_VERSION parameter of CREATE DATABASE. This +setting cannot be changed.

+ + +isTemplate
+bool + + +

Maps to the IS_TEMPLATE parameter of CREATE DATABASE and ALTER DATABASE. If true, this database is considered a template and can +be cloned by any user with CREATEDB privileges.

+ + +allowConnections
+bool + + +

Maps to the ALLOW_CONNECTIONS parameter of CREATE DATABASE and +ALTER DATABASE. If false then no one can connect to this database.

+ + +connectionLimit
+int + + +

Maps to the CONNECTION LIMIT clause of CREATE DATABASE and +ALTER DATABASE. How many concurrent connections can be made to +this database. -1 (the default) means no limit.

+ + +tablespace
+string + + +

Maps to the TABLESPACE parameter of CREATE DATABASE. +Maps to the SET TABLESPACE command of ALTER DATABASE. +The name of the tablespace (in PostgreSQL) that will be associated +with the new database. This tablespace will be the default +tablespace used for objects created in this database.

+ + +databaseReclaimPolicy
+DatabaseReclaimPolicy + + +

The policy for end-of-life maintenance of this database.

+ + +schemas
+[]SchemaSpec + + +

The list of schemas to be managed in the database

+ + +extensions
+[]ExtensionSpec + + +

The list of extensions to be managed in the database

+ + +fdws
+[]FDWSpec + + +

The list of foreign data wrappers to be managed in the database

+ + + + ## DatabaseStatus {#postgresql-cnpg-io-v1-DatabaseStatus} @@ -2368,18 +2758,39 @@ database is not valid

desired state that was synchronized

-ready [Required]
+applied
bool -

Ready is true if the database was reconciled correctly

+

Applied is true if the database was reconciled correctly

-error [Required]
+message
string -

Error is the reconciliation error message

+

Message is the reconciliation output message

+ + +schemas
+[]DatabaseObjectStatus + + +

Schemas is the status of the managed schemas

+ + +extensions
+[]DatabaseObjectStatus + + +

Extensions is the status of the managed extensions

+ + +fdws
+[]DatabaseObjectStatus + + +

FDWs is the status of the managed FDWs

@@ -2420,6 +2831,12 @@ desired state that was synchronized

**Appears in:** +- [DatabaseObjectSpec](#postgresql-cnpg-io-v1-DatabaseObjectSpec) + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + +- [OptionSpecValue](#postgresql-cnpg-io-v1-OptionSpecValue) + - [RoleConfiguration](#postgresql-cnpg-io-v1-RoleConfiguration) @@ -2444,14 +2861,14 @@ storage

- -
FieldDescription
shm [Required]
+
shm
k8s.io/apimachinery/pkg/api/resource.Quantity

Shm is the size limit of the shared memory volume

temporaryData [Required]
+
temporaryData
k8s.io/apimachinery/pkg/api/resource.Quantity
@@ -2461,6 +2878,105 @@ storage

+## ExtensionConfiguration {#postgresql-cnpg-io-v1-ExtensionConfiguration} + + +**Appears in:** + +- [PostgresConfiguration](#postgresql-cnpg-io-v1-PostgresConfiguration) + + +

ExtensionConfiguration is the configuration used to add +PostgreSQL extensions to the Cluster.

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

The name of the extension, required

+
image [Required]
+core/v1.ImageVolumeSource +
+

The image containing the extension, required

+
extension_control_path
+[]string +
+

The list of directories inside the image which should be added to extension_control_path. +If not defined, defaults to "/share".

+
dynamic_library_path
+[]string +
+

The list of directories inside the image which should be added to dynamic_library_path. +If not defined, defaults to "/lib".

+
ld_library_path
+[]string +
+

The list of directories inside the image which should be added to ld_library_path.

+
+ +## ExtensionSpec {#postgresql-cnpg-io-v1-ExtensionSpec} + + +**Appears in:** + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + + +

ExtensionSpec configures an extension in a database

+ + + + + + + + + + + + + + + +
FieldDescription
DatabaseObjectSpec
+DatabaseObjectSpec +
(Members of DatabaseObjectSpec are embedded into this type.) +

Common fields

+
version [Required]
+string +
+

The version of the extension to install. If empty, the operator will +install the default version (whatever is specified in the +extension's control file)

+
schema [Required]
+string +
+

The name of the schema in which to install the extension's objects, +in case the extension allows its contents to be relocated. If not +specified (default), and the extension's control file does not +specify a schema either, the current default object creation schema +is used.

+
+ ## ExternalCluster {#postgresql-cnpg-io-v1-ExternalCluster} @@ -2528,12 +3044,134 @@ secure and efficient password management for external clusters.

barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration +github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration

The configuration for the barman-cloud tool suite

+plugin [Required]
+PluginConfiguration + + +

The configuration of the plugin that is taking care +of WAL archiving and backups for this external cluster

+ + + + + +## FDWSpec {#postgresql-cnpg-io-v1-FDWSpec} + + +**Appears in:** + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + + +

FDWSpec configures an Foreign Data Wrapper in a database

+ + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
DatabaseObjectSpec
+DatabaseObjectSpec +
(Members of DatabaseObjectSpec are embedded into this type.) +

Common fields

+
handler
+string +
+

Name of the handler function (e.g., "postgres_fdw_handler"). +This will be empty if no handler is specified. In that case, +the default handler is registered when the FDW extension is created.

+
validator
+string +
+

Name of the validator function (e.g., "postgres_fdw_validator"). +This will be empty if no validator is specified. In that case, +the default validator is registered when the FDW extension is created.

+
owner
+string +
+

Owner specifies the database role that will own the Foreign Data Wrapper. +The role must have superuser privileges in the target database.

+
options
+[]OptionSpec +
+

Options specifies the configuration options for the FDW +(key is the option name, value is the option value).

+
usage
+[]UsageSpec +
+

List of roles for which USAGE privileges on the FDW are granted or revoked.

+
+ +## FailoverQuorumStatus {#postgresql-cnpg-io-v1-FailoverQuorumStatus} + + +**Appears in:** + +- [FailoverQuorum](#postgresql-cnpg-io-v1-FailoverQuorum) + + +

FailoverQuorumStatus is the latest observed status of the failover +quorum of the PG cluster.

+ + + + + + + + + + + + + + + + +
FieldDescription
method
+string +
+

Contains the latest reported Method value.

+
standbyNames
+[]string +
+

StandbyNames is the list of potentially synchronous +instance names.

+
standbyNumber
+int +
+

StandbyNumber is the number of synchronous standbys that transactions +need to wait for replies from.

+
primary
+string +
+

Primary is the name of the primary instance that updated +this object the latest time.

+
@@ -2593,6 +3231,37 @@ secure and efficient password management for external clusters.

+## ImageInfo {#postgresql-cnpg-io-v1-ImageInfo} + + +**Appears in:** + +- [ClusterStatus](#postgresql-cnpg-io-v1-ClusterStatus) + + +

ImageInfo contains the information about a PostgreSQL image

+ + + + + + + + + + + + +
FieldDescription
image [Required]
+string +
+

Image is the image name

+
majorVersion [Required]
+int +
+

MajorVersion is the major version of the image

+
+ ## Import {#postgresql-cnpg-io-v1-Import} @@ -2652,6 +3321,26 @@ database right after is imported - to be used with extreme care pg_restore are invoked, avoiding data import. Default: false.

+pgDumpExtraOptions
+[]string + + +

List of custom options to pass to the pg_dump command. IMPORTANT: +Use these options with caution and at your own risk, as the operator +does not validate their content. Be aware that certain options may +conflict with the operator's intended functionality or design.

+ + +pgRestoreExtraOptions
+[]string + + +

List of custom options to pass to the pg_restore command. IMPORTANT: +Use these options with caution and at your own risk, as the operator +does not validate their content. Be aware that certain options may +conflict with the operator's intended functionality or design.

+ + @@ -2738,6 +3427,52 @@ database right after is imported - to be used with extreme care

indicates on which TimelineId the instance is

+ip [Required]
+string + + +

IP address of the instance

+ + + + + +## IsolationCheckConfiguration {#postgresql-cnpg-io-v1-IsolationCheckConfiguration} + + +**Appears in:** + +- [LivenessProbe](#postgresql-cnpg-io-v1-LivenessProbe) + + +

IsolationCheckConfiguration contains the configuration for the isolation check +functionality in the liveness probe

+ + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
+

Whether primary isolation checking is enabled for the liveness probe

+
requestTimeout
+int +
+

Timeout in milliseconds for requests during the primary isolation check

+
connectionTimeout
+int +
+

Timeout in milliseconds for connections during the primary isolation check

+
@@ -2899,6 +3634,41 @@ the bind+search LDAP authentication process

+## LivenessProbe {#postgresql-cnpg-io-v1-LivenessProbe} + + +**Appears in:** + +- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) + + +

LivenessProbe is the configuration of the liveness probe

+ + + + + + + + + + + + +
FieldDescription
Probe
+Probe +
(Members of Probe are embedded into this type.) +

Probe is the standard probe configuration

+
isolationCheck
+IsolationCheckConfiguration +
+

Configure the feature that extends the liveness probe for a primary +instance. In addition to the basic checks, this verifies whether the +primary is isolated from the Kubernetes API server and from its +replicas, ensuring that it can be safely shut down if network +partition or API unavailability is detected. Enabled by default.

+
+ ## ManagedConfiguration {#postgresql-cnpg-io-v1-ManagedConfiguration} @@ -2993,7 +3763,7 @@ It includes the type of service and its associated template specification.

Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.

-updateStrategy [Required]
+updateStrategy
ServiceUpdateStrategy @@ -3032,7 +3802,7 @@ Valid values are "rw", "r", and "ro", representing Valid values are "r", and "ro", representing read, and read-only services.

-additional [Required]
+additional
[]ManagedService @@ -3063,7 +3833,7 @@ not using the core data types.

-
FieldDescription
name [Required]
+
name
string
@@ -3118,14 +3888,14 @@ Default: false.

customQueriesConfigMap
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector

The list of config maps containing the custom queries

customQueriesSecret
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

The list of secrets containing the custom queries

@@ -3245,6 +4015,71 @@ possible. false by default.

+## OptionSpec {#postgresql-cnpg-io-v1-OptionSpec} + + +**Appears in:** + +- [FDWSpec](#postgresql-cnpg-io-v1-FDWSpec) + + +

OptionSpec holds the name, value and the ensure field for an option

+ + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name of the option

+
OptionSpecValue
+OptionSpecValue +
(Members of OptionSpecValue are embedded into this type.) +

Value and ensure field of the option

+
+ +## OptionSpecValue {#postgresql-cnpg-io-v1-OptionSpecValue} + + +**Appears in:** + +- [OptionSpec](#postgresql-cnpg-io-v1-OptionSpec) + + +

OptionSpecValue holds the value and the ensure field for an option

+ + + + + + + + + + + + +
FieldDescription
value [Required]
+string +
+

Value of the option

+
ensure
+EnsureOption +
+

Specifies whether an option should be present or absent in +the database. If set to present, the option will be +created if it does not exist. If set to absent, the +option will be removed if it exists.

+
+ ## PasswordState {#postgresql-cnpg-io-v1-PasswordState} @@ -3360,7 +4195,7 @@ by pgbouncer

authQuerySecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The credentials of the user that need to be used for the authentication @@ -3408,6 +4243,55 @@ the operator calls PgBouncer's PAUSE and RESUME comman +## PluginConfiguration {#postgresql-cnpg-io-v1-PluginConfiguration} + + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + +- [ExternalCluster](#postgresql-cnpg-io-v1-ExternalCluster) + + +

PluginConfiguration specifies a plugin that need to be loaded for this +cluster to be reconciled

+ + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the plugin name

+
enabled
+bool +
+

Enabled is true if this plugin will be used

+
isWALArchiver
+bool +
+

Only one plugin can be declared as WALArchiver. +Cannot be active if ".spec.backup.barmanObjectStore" configuration is present.

+
parameters
+map[string]string +
+

Parameters is the configuration of the plugin

+
+ ## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus} @@ -3437,7 +4321,7 @@ the operator calls PgBouncer's PAUSE and RESUME comman latest reconciliation loop

-capabilities [Required]
+capabilities
[]string @@ -3445,7 +4329,7 @@ latest reconciliation loop

plugin

-operatorCapabilities [Required]
+operatorCapabilities
[]string @@ -3453,7 +4337,7 @@ plugin

plugin regarding the reconciler

-walCapabilities [Required]
+walCapabilities
[]string @@ -3461,7 +4345,7 @@ plugin regarding the reconciler

plugin regarding the WAL management

-backupCapabilities [Required]
+backupCapabilities
[]string @@ -3469,7 +4353,15 @@ plugin regarding the WAL management

plugin regarding the Backup management

-status [Required]
+restoreJobHookCapabilities
+[]string + + +

RestoreJobHookCapabilities are the list of capabilities of the +plugin regarding the RestoreJobHook management

+ + +status
string @@ -3658,7 +4550,7 @@ part for now.

FieldDescription cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

This is the cluster reference on which the Pooler will work. @@ -3811,75 +4703,477 @@ to the pg_ident.conf file)

SyncReplicaElectionConstraints -

Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be -set up.

+

Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be +set up.

+ + +shared_preload_libraries
+[]string + + +

Lists of shared preload libraries to add to the default ones

+ + +ldap
+LDAPConfig + + +

Options to specify LDAP configuration

+ + +promotionTimeout
+int32 + + +

Specifies the maximum number of seconds to wait when promoting an instance to primary. +Default value is 40000000, greater than one year in seconds, +big enough to simulate an infinite timeout

+ + +enableAlterSystem
+bool + + +

If this parameter is true, the user will be able to invoke ALTER SYSTEM +on this CloudNativePG Cluster. +This should only be used for debugging and troubleshooting. +Defaults to false.

+ + +extensions
+[]ExtensionConfiguration + + +

The configuration of the extensions to be added

+ + + + + +## PrimaryUpdateMethod {#postgresql-cnpg-io-v1-PrimaryUpdateMethod} + +(Alias of `string`) + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

PrimaryUpdateMethod contains the method to use when upgrading +the primary server of the cluster as part of rolling updates

+ + + + +## PrimaryUpdateStrategy {#postgresql-cnpg-io-v1-PrimaryUpdateStrategy} + +(Alias of `string`) + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

PrimaryUpdateStrategy contains the strategy to follow when upgrading +the primary server of the cluster as part of rolling updates

+ + + + +## Probe {#postgresql-cnpg-io-v1-Probe} + + +**Appears in:** + +- [LivenessProbe](#postgresql-cnpg-io-v1-LivenessProbe) + +- [ProbeWithStrategy](#postgresql-cnpg-io-v1-ProbeWithStrategy) + + +

Probe describes a health check to be performed against a container to determine whether it is +alive or ready to receive traffic.

+ + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
initialDelaySeconds
+int32 +
+

Number of seconds after the container has started before liveness probes are initiated. +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes

+
timeoutSeconds
+int32 +
+

Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes

+
periodSeconds
+int32 +
+

How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1.

+
successThreshold
+int32 +
+

Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.

+
failureThreshold
+int32 +
+

Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1.

+
terminationGracePeriodSeconds
+int64 +
+

Optional duration in seconds the pod needs to terminate gracefully upon probe failure. +The grace period is the duration in seconds after the processes running in the pod are sent +a termination signal and the time when the processes are forcibly halted with a kill signal. +Set this value longer than the expected cleanup time for your process. +If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this +value overrides the value provided by the pod spec. +Value must be non-negative integer. The value zero indicates stop immediately via +the kill signal (no opportunity to shut down). +This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. +Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.

+
+ +## ProbeStrategyType {#postgresql-cnpg-io-v1-ProbeStrategyType} + +(Alias of `string`) + +**Appears in:** + +- [ProbeWithStrategy](#postgresql-cnpg-io-v1-ProbeWithStrategy) + + +

ProbeStrategyType is the type of the strategy used to declare a PostgreSQL instance +ready

+ + + + +## ProbeWithStrategy {#postgresql-cnpg-io-v1-ProbeWithStrategy} + + +**Appears in:** + +- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) + + +

ProbeWithStrategy is the configuration of the startup probe

+ + + + + + + + + + + + + + + +
FieldDescription
Probe
+Probe +
(Members of Probe are embedded into this type.) +

Probe is the standard probe configuration

+
type
+ProbeStrategyType +
+

The probe strategy

+
maximumLag
+k8s.io/apimachinery/pkg/api/resource.Quantity +
+

Lag limit. Used only for streaming strategy

+
+ +## ProbesConfiguration {#postgresql-cnpg-io-v1-ProbesConfiguration} + + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

ProbesConfiguration represent the configuration for the probes +to be injected in the PostgreSQL Pods

+ + + + + + + + + + + + + + + +
FieldDescription
startup [Required]
+ProbeWithStrategy +
+

The startup probe configuration

+
liveness [Required]
+LivenessProbe +
+

The liveness probe configuration

+
readiness [Required]
+ProbeWithStrategy +
+

The readiness probe configuration

+
+ +## PublicationReclaimPolicy {#postgresql-cnpg-io-v1-PublicationReclaimPolicy} + +(Alias of `string`) + +**Appears in:** + +- [PublicationSpec](#postgresql-cnpg-io-v1-PublicationSpec) + + +

PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications.

+ + + + +## PublicationSpec {#postgresql-cnpg-io-v1-PublicationSpec} + + +**Appears in:** + +- [Publication](#postgresql-cnpg-io-v1-Publication) + + +

PublicationSpec defines the desired state of Publication

+ + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster that identifies the "publisher"

+
name [Required]
+string +
+

The name of the publication inside PostgreSQL

+
dbname [Required]
+string +
+

The name of the database where the publication will be installed in +the "publisher" cluster

+
parameters
+map[string]string +
+

Publication parameters part of the WITH clause as expected by +PostgreSQL CREATE PUBLICATION command

+
target [Required]
+PublicationTarget +
+

Target of the publication as expected by PostgreSQL CREATE PUBLICATION command

+
publicationReclaimPolicy
+PublicationReclaimPolicy +
+

The policy for end-of-life maintenance of this publication

+
+ +## PublicationStatus {#postgresql-cnpg-io-v1-PublicationStatus} + + +**Appears in:** + +- [Publication](#postgresql-cnpg-io-v1-Publication) + + +

PublicationStatus defines the observed state of Publication

+ + + + + + + - - - +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

shared_preload_libraries
-[]string +
applied
+bool
-

Lists of shared preload libraries to add to the default ones

+

Applied is true if the publication was reconciled correctly

ldap
-LDAPConfig +
message
+string
-

Options to specify LDAP configuration

+

Message is the reconciliation output message

promotionTimeout
-int32 +
+ +## PublicationTarget {#postgresql-cnpg-io-v1-PublicationTarget} + + +**Appears in:** + +- [PublicationSpec](#postgresql-cnpg-io-v1-PublicationSpec) + + +

PublicationTarget is what this publication should publish

+ + + + + + -
FieldDescription
allTables
+bool
-

Specifies the maximum number of seconds to wait when promoting an instance to primary. -Default value is 40000000, greater than one year in seconds, -big enough to simulate an infinite timeout

+

Marks the publication as one that replicates changes for all tables +in the database, including tables created in the future. +Corresponding to FOR ALL TABLES in PostgreSQL.

enableAlterSystem
-bool +
objects
+[]PublicationTargetObject
-

If this parameter is true, the user will be able to invoke ALTER SYSTEM -on this CloudNativePG Cluster. -This should only be used for debugging and troubleshooting. -Defaults to false.

+

Just the following schema objects

-## PrimaryUpdateMethod {#postgresql-cnpg-io-v1-PrimaryUpdateMethod} +## PublicationTargetObject {#postgresql-cnpg-io-v1-PublicationTargetObject} -(Alias of `string`) **Appears in:** -- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) - +- [PublicationTarget](#postgresql-cnpg-io-v1-PublicationTarget) -

PrimaryUpdateMethod contains the method to use when upgrading -the primary server of the cluster as part of rolling updates

+

PublicationTargetObject is an object to publish

+ + + + + + + + + + +
FieldDescription
tablesInSchema
+string +
+

Marks the publication as one that replicates changes for all tables +in the specified list of schemas, including tables created in the +future. Corresponding to FOR TABLES IN SCHEMA in PostgreSQL.

+
table
+PublicationTargetTable +
+

Specifies a list of tables to add to the publication. Corresponding +to FOR TABLE in PostgreSQL.

+
-## PrimaryUpdateStrategy {#postgresql-cnpg-io-v1-PrimaryUpdateStrategy} +## PublicationTargetTable {#postgresql-cnpg-io-v1-PublicationTargetTable} -(Alias of `string`) **Appears in:** -- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) - +- [PublicationTargetObject](#postgresql-cnpg-io-v1-PublicationTargetObject) -

PrimaryUpdateStrategy contains the strategy to follow when upgrading -the primary server of the cluster as part of rolling updates

+

PublicationTargetTable is a table to publish

+ + + + + + + + + + + + + + + + +
FieldDescription
only
+bool +
+

Whether to limit to the table only or include all its descendants

+
name [Required]
+string +
+

The table name

+
schema
+string +
+

The schema name

+
columns
+[]string +
+

The columns to publish

+
## RecoveryTarget {#postgresql-cnpg-io-v1-RecoveryTarget} @@ -3975,7 +5269,7 @@ cluster

- - - - - + + +
FieldDescription
self [Required]
+
self
string
@@ -3983,7 +5277,7 @@ cluster

or a replica cluster, comparing it with primary

primary [Required]
+
primary
string
@@ -3998,7 +5292,7 @@ topology specified in externalClusters

The name of the external cluster which is the replication origin

enabled [Required]
+
enabled
bool
@@ -4008,7 +5302,7 @@ object store or via streaming through pg_basebackup. Refer to the Replica clusters page of the documentation for more information.

promotionToken [Required]
+
promotionToken
string
@@ -4016,7 +5310,7 @@ Refer to the Replica clusters page of the documentation for more information.

minApplyDelay [Required]
+
minApplyDelay
meta/v1.Duration
@@ -4113,6 +5407,19 @@ It may only contain lower case letters, numbers, and the underscore character. This can only be set at creation time. By default set to _cnpg_.

synchronizeLogicalDecoding
+bool +
+

When enabled, the operator automatically manages synchronization of logical +decoding (replication) slots across high-availability clusters.

+

Requires one of the following conditions:

+
    +
  • PostgreSQL version 17 or later
  • +
  • PostgreSQL version < 17 with pg_failover_slots extension enabled
  • +
+
@@ -4156,7 +5463,7 @@ Reference: https://www.postgresql.org/docs/current/sql-createrole.html

passwordSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Secret containing the password of the role (if present) @@ -4283,14 +5590,14 @@ in their respective arrays.

FieldDescription secretRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

SecretRefs holds a list of references to Secrets

configMapRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector

ConfigMapRefs holds a list of references to ConfigMaps

@@ -4337,7 +5644,7 @@ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format

cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The cluster to backup

@@ -4440,6 +5747,39 @@ Overrides the default settings specified in the cluster '.backup.volumeSnapshot. +## SchemaSpec {#postgresql-cnpg-io-v1-SchemaSpec} + + +**Appears in:** + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + + +

SchemaSpec configures a schema in a database

+ + + + + + + + + + + + +
FieldDescription
DatabaseObjectSpec
+DatabaseObjectSpec +
(Members of DatabaseObjectSpec are embedded into this type.) +

Common fields

+
owner [Required]
+string +
+

The role name of the user who owns the schema inside PostgreSQL. +It maps to the AUTHORIZATION parameter of CREATE SCHEMA and the +OWNER TO command of ALTER SCHEMA.

+
+ ## SecretVersion {#postgresql-cnpg-io-v1-SecretVersion} @@ -4743,6 +6083,139 @@ Size cannot be decreased.

+## SubscriptionReclaimPolicy {#postgresql-cnpg-io-v1-SubscriptionReclaimPolicy} + +(Alias of `string`) + +**Appears in:** + +- [SubscriptionSpec](#postgresql-cnpg-io-v1-SubscriptionSpec) + + +

SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions.

+ + + + +## SubscriptionSpec {#postgresql-cnpg-io-v1-SubscriptionSpec} + + +**Appears in:** + +- [Subscription](#postgresql-cnpg-io-v1-Subscription) + + +

SubscriptionSpec defines the desired state of Subscription

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster that identifies the "subscriber"

+
name [Required]
+string +
+

The name of the subscription inside PostgreSQL

+
dbname [Required]
+string +
+

The name of the database where the publication will be installed in +the "subscriber" cluster

+
parameters
+map[string]string +
+

Subscription parameters included in the WITH clause of the PostgreSQL +CREATE SUBSCRIPTION command. Most parameters cannot be changed +after the subscription is created and will be ignored if modified +later, except for a limited set documented at: +https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET

+
publicationName [Required]
+string +
+

The name of the publication inside the PostgreSQL database in the +"publisher"

+
publicationDBName
+string +
+

The name of the database containing the publication on the external +cluster. Defaults to the one in the external cluster definition.

+
externalClusterName [Required]
+string +
+

The name of the external cluster with the publication ("publisher")

+
subscriptionReclaimPolicy
+SubscriptionReclaimPolicy +
+

The policy for end-of-life maintenance of this subscription

+
+ +## SubscriptionStatus {#postgresql-cnpg-io-v1-SubscriptionStatus} + + +**Appears in:** + +- [Subscription](#postgresql-cnpg-io-v1-Subscription) + + +

SubscriptionStatus defines the observed state of Subscription

+ + + + + + + + + + + + + + + +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

+
applied
+bool +
+

Applied is true if the subscription was reconciled correctly

+
message
+string +
+

Message is the reconciliation output message

+
+ ## SwitchReplicaClusterStatus {#postgresql-cnpg-io-v1-SwitchReplicaClusterStatus} @@ -4830,12 +6303,6 @@ physical replication slots

List of regular expression patterns to match the names of replication slots to be excluded (by default empty)

-- [Required]
-synchronizeReplicasCache - - - No description provided. - @@ -4900,6 +6367,20 @@ only useful for priority-based synchronous replication).

only useful for priority-based synchronous replication).

+dataDurability
+DataDurabilityLevel + + +

If set to "required", data durability is strictly enforced. Write operations +with synchronous commit settings (on, remote_write, or remote_apply) will +block if there are insufficient healthy replicas, ensuring data persistence. +If set to "preferred", data durability is maintained when healthy replicas +are available, but the required number of instances will adjust dynamically +if replicas become unavailable. This setting relaxes strict durability enforcement +to allow for operational continuity. This setting is only applicable if both +standbyNamesPre and standbyNamesPost are unset (empty).

+ + @@ -5069,6 +6550,37 @@ in synchronous replica election in case of failures

+## UsageSpec {#postgresql-cnpg-io-v1-UsageSpec} + + +**Appears in:** + +- [FDWSpec](#postgresql-cnpg-io-v1-FDWSpec) + + +

UsageSpec configures a usage for a foreign data wrapper

+ + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name of the usage

+
type
+string +
+

The type of usage

+
+ ## VolumeSnapshotConfiguration {#postgresql-cnpg-io-v1-VolumeSnapshotConfiguration} diff --git a/docs/src/cluster_conf.md b/docs/src/cluster_conf.md index 43ed25b3fd..2ecbf0f1ad 100644 --- a/docs/src/cluster_conf.md +++ b/docs/src/cluster_conf.md @@ -1,4 +1,5 @@ # Instance pod configuration + ## Projected volumes @@ -47,7 +48,7 @@ CloudNativePG relies on [ephemeral volumes](https://kubernetes.io/docs/concepts/ for part of the internal activities. Ephemeral volumes exist for the sole duration of a pod's life, without persisting across pod restarts. -# Volume Claim Template for Temporary Storage +### Volume Claim Template for Temporary Storage The operator uses by default an `emptyDir` volume, which can be customized by using the `.spec.ephemeralVolumesSizeLimit field`. This can be overridden by specifying a volume claim template in the `.spec.ephemeralVolumeSource` field. diff --git a/docs/src/cncf-projects/cilium.md b/docs/src/cncf-projects/cilium.md new file mode 100644 index 0000000000..909a365e7d --- /dev/null +++ b/docs/src/cncf-projects/cilium.md @@ -0,0 +1,260 @@ +# Cilium + +## About + +[Cilium](https://cilium.io/) is a CNCF Graduated project that was accepted as +an Incubating project in 2021 and graduated in 2023. It was originally created +by Isovalent. It is an advanced networking, security, and observability +solution for cloud native environments, built on top of +[eBPF](https://ebpf.io/) technology. Cilium manages network traffic in +Kubernetes clusters by dynamically injecting eBPF programs into the Linux +Kernel, enabling low-latency, high-performance communication, and enforcing +fine-grained security policies. + +Key features of Cilium: + +- Advanced L3-L7 security policies for fine-grained network traffic control +- Efficient, kernel-level traffic management via eBPF +- Service Mesh integration (Cilium Service Mesh) +- Support for both Kubernetes NetworkPolicy and CiliumNetworkPolicy +- Built-in observability and monitoring with Hubble + +To install Cilium in your environment, follow the instructions in the documentation: +[https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) + +## Pod-to-Pod Network Security with CloudNativePG and Cilium + +Kubernetes’ default behavior is to allow traffic between any two Pods in the cluster network. +Cilium provides advanced L3/L4 network security using the `CiliumNetworkPolicy` resource. This +enables fine-grained control over network traffic between Pods within a Kubernetes cluster. It is +especially useful for securing communication between application workloads and backend +services. + +In the following examples, we demonstrate how Cilium can be used to secure a +CloudNativePG PostgreSQL instance by restricting ingress traffic to only +authorized Pods. + +!!! Important + Before proceeding, ensure that the `cluster-example` Postgres cluster is up + and running in your environment. + +## Default Deny Behavior in Cilium + +By default, Cilium does **not** deny all traffic unless explicitly configured +to do so. In contrast to Kubernetes NetworkPolicy, which uses a deny-by-default +model once a policy is present in a namespace, Cilium provides more flexible +control over default deny behavior. + +To enforce a default deny posture with Cilium, you need to explicitly create a +policy that denies all traffic to a set of Pods unless otherwise allowed. This +is commonly achieved by using an **empty `ingress` section** in combination +with `endpointSelector`, or by enabling **`--enable-default-deny`** at the +Cilium agent level for broader enforcement. + +A minimal example of a default deny policy: + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: default-deny + namespace: default +spec: + description: "Default deny all ingress traffic to all Pods in this namespace" + endpointSelector: {} + ingress: [] +``` + +## Making Cilium Network Policies work with CloudNativePG Operator + +When working with a network policy, Cilium or not, the first step is to make +sure that the operator can reach the Pods in the target namespace. This is +important because the operator needs to be able to perform checks and actions +on the Pods, and one of those actions requires access to the port `8000` on the +Pods to get the current status of the PostgreSQL instance running inside. + +The following `CiliumNetworkPolicy` allows the operator to access the Pods in +the target `default` namespace: + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: cnpg-operator-access + namespace: default +spec: + description: "Allow CloudNativePG operator access to any pod in the target namespace" + endpointSelector: {} + ingress: + - fromEndpoints: + - matchLabels: + io.kubernetes.pod.namespace: cnpg-system + toPorts: + - ports: + - port: "8000" + protocol: TCP +``` +!!! Important + The `cnpg-system` namespace is the default namespace for the operator when + using the YAML manifests. If the operator was installed using a different + process (Helm, OLM, etc.), the namespace may be different. Make sure to adjust + the namespace properly. + +## Allowing access between cluster Pods + +Since the default policy is "deny all", we need to explicitly allow access +between the cluster Pods in the same namespace. We will improve our previous +policy by adding the required ingress rule: + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: cnpg-cluster-internal-access + namespace: default +spec: + description: "Allow CloudNativePG operator access and connection between pods in the same namespace" + endpointSelector: {} + ingress: + - fromEndpoints: + - matchLabels: + io.kubernetes.pod.namespace: cnpg-system + - matchLabels: + io.kubernetes.pod.namespace: default + cnpg.io/cluster: cluster-example + toPorts: + - ports: + - port: "8000" + protocol: TCP + - port: "5432" + protocol: TCP +``` + +The policy allows access from `cnpg-system` Pods and from `default` namespace +Pods that also belong to `cluster-example`. The `matchLabels` selector requires +Pods to have the complete set of listed labels. Missing even one label means +the Pod will not match. + +## Restricting Access to PostgreSQL with Cilium + +In this example, we define a `CiliumNetworkPolicy` that allows only Pods +labeled `role=backend` in the `default` namespace to connect to a PostgreSQL +cluster named `cluster-example`. All other ingress traffic is blocked by +default. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-access-backend-label + namespace: default +spec: + description: "Allow PostgreSQL access on port 5432 from Pods with role=backend" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEndpoints: + - matchLabels: + role: backend + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +This `CiliumNetworkPolicy` ensures that only Pods labeled with `role=backend` +can access the PostgreSQL instance managed by CloudNativePG via port 5432 in +the `default` namespace. + +In the following policy, we demonstrate how to allow ingress traffic to port +5432 of a PostgreSQL cluster named `cluster-example`, only from Pods with the +label `role=backend` in any namespace. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-access-backend-any-ns + namespace: default +spec: + description: "Allow PostgreSQL access on port 5432 from Pods with role=backend in any namespace" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEndpoints: + - labelSelector: + matchLabels: + role: backend + matchExpressions: + - key: io.kubernetes.pod.namespace + operator: Exists + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +The following example allows ingress traffic to port 5432 of the +`cluster-example` cluster (located in the `default` namespace) from any Pods in +the `backend` namespace. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-access-backend-namespace + namespace: default +spec: + description: "Allow PostgreSQL access on port 5432 from any Pods in the backend namespace" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEndpoints: + - matchLabels: + io.kubernetes.pod.namespace: backend + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +Using Cilium’s L3/L4 policy model, we define a `CiliumNetworkPolicy` that +explicitly allows ingress traffic to cluster Pods only from application Pods in +the `backend` namespace. All other traffic is implicitly denied unless +explicitly permitted by additional policies. + +The following example allows ingress traffic to port 5432 of the +`cluster-example` cluster (located in the `default` namespace) from any source +within the Kubernetes cluster. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-access-cluster-wide + namespace: default +spec: + description: "Allow ingress traffic to port 5432 of the cluster-example from any pods within the Kubernetes cluster" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEntities: + - cluster + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +You may consider using [editor.networkpolicy.io](https://editor.networkpolicy.io/), +a visual and interactive tool that simplifies the creation and validation of +Cilium Network Policies. It’s especially helpful for avoiding misconfigurations +and understanding traffic rules more clearly by presenting in a visual way. + +With these policies, you've established baseline access controls for +PostgreSQL. You can layer additional egress or audit rules using Cilium's +policy language or extend to L7 enforcement with Envoy. diff --git a/docs/src/cncf-projects/external-secrets.md b/docs/src/cncf-projects/external-secrets.md new file mode 100644 index 0000000000..1fd2c840b4 --- /dev/null +++ b/docs/src/cncf-projects/external-secrets.md @@ -0,0 +1,265 @@ +# External Secrets + +[External Secrets](https://external-secrets.io/latest/) is a CNCF Sandbox +project, accepted in 2022 under the sponsorship of TAG Security. + +## About + +The **External Secrets Operator (ESO)** is a Kubernetes operator that enhances +secret management by decoupling the storage of secrets from Kubernetes itself. +It enables seamless synchronization between external secret management systems +and native Kubernetes `Secret` resources. + +ESO supports a wide range of backends, including: + +- [HashiCorp Vault](https://www.vaultproject.io/) +- [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) +- [Google Secret Manager](https://cloud.google.com/secret-manager) +- [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) +- [IBM Cloud Secrets Manager](https://www.ibm.com/cloud/secrets-manager) + +…and many more. For a full and up-to-date list of supported providers, refer to +the [official External Secrets documentation](https://external-secrets.io/latest/). + +## Integration with PostgreSQL and CloudNativePG + +When it comes to PostgreSQL databases, External Secrets integrates seamlessly +with [CloudNativePG](https://cloudnative-pg.io/) in two major use cases: + +- **Automated password management:** ESO can handle the automatic generation + and rotation of database user passwords stored in Kubernetes `Secret` + resources, ensuring that applications running inside the cluster always have + access to up-to-date credentials. + +- **Cross-platform secret access:** It enables transparent synchronization of + those passwords with an external Key Management Service (KMS) via a + `SecretStore` resources. This allows applications and developers outside the + Kubernetes cluster—who may not have access to Kubernetes secrets—to retrieve + the database credentials directly from the external KMS. + +## Example: Automated Password Management with External Secrets + +Let’s walk through how to automatically rotate the password of the `app` user +every 24 hours in the `cluster-example` Postgres cluster from the +[quickstart guide](../quickstart.md#part-3-deploy-a-postgresql-cluster). + +!!! Important + Before proceeding, ensure that the `cluster-example` Postgres cluster is up + and running in your environment. + +By default, CloudNativePG generates and manages a Kubernetes `Secret` named +`cluster-example-app`, which contains the credentials for the `app` user in the +`cluster-example` cluster. You can read more about this in the +[“Connecting from an application” section](../applications.md#secrets). + +With External Secrets, the goal is to: + +1. Define a `Password` generator that specifies how to generate the password. +2. Create an `ExternalSecret` resource that keeps the `cluster-example-app` + secret in sync by updating only the `password` and `pgpass` fields. + +### Creating the Password Generator + +The following example creates a +[`Password` generator](https://external-secrets.io/main/api/generator/password/) +resource named `pg-password-generator` in the `default` namespace. You can +customize the name and properties to suit your needs: + +```yaml +apiVersion: generators.external-secrets.io/v1alpha1 +kind: Password +metadata: + name: pg-password-generator +spec: + length: 42 + digits: 5 + symbols: 5 + symbolCharacters: "-_$@" + noUpper: false + allowRepeat: true +``` + +This specification defines the characteristics of the generated password, +including its length and the inclusion of digits, symbols, and uppercase +letters. + +### Creating the External Secret + +The example below creates an `ExternalSecret` resource named +`cluster-example-app-secret`, which refreshes the password every 24 hours. It +uses a `Merge` policy to update only the specified fields (`password`, `pgpass`, +`jdbc-uri` and `uri`) in the `cluster-example-app` secret. + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cluster-example-app-secret +spec: + refreshInterval: "24h" + target: + name: cluster-example-app + creationPolicy: Merge + template: + metadata: + labels: + cnpg.io/reload: "true" + data: + password: "{{ .password }}" + pgpass: "cluster-example-rw:5432:app:app:{{ .password }}" + jdbc-uri: "jdbc:postgresql://cluster-example-rw.default:5432/app?password={{ .password }}&user=app" + uri: "postgresql://app:{{ .password }}@cluster-example-rw.default:5432/app" + dataFrom: + - sourceRef: + generatorRef: + apiVersion: generators.external-secrets.io/v1alpha1 + kind: Password + name: pg-password-generator +``` + +The label `cnpg.io/reload: "true"` ensures that CloudNativePG triggers a reload +of the user password in the database when the secret changes. + +### Verifying the Configuration + +To check that the `ExternalSecret` is correctly synchronizing: + +```sh +kubectl get es cluster-example-app-secret +``` + +To observe the password being refreshed in real time, temporarily reduce the +`refreshInterval` to `30s` and run the following command repeatedly: + +```sh +kubectl get secret cluster-example-app \ + -o jsonpath="{.data.password}" | base64 -d +``` + +You should see the password change every 30 seconds, confirming that the +rotation is working correctly. + +### There's More + +While the example above focuses on the default `cluster-example-app` secret +created by CloudNativePG, the same approach can be extended to manage any +custom secrets or PostgreSQL users you create to regularly rotate their +password. + + +## Example: Integration with an External KMS + +One of the most widely used Key Management Service (KMS) providers in the CNCF +ecosystem is [HashiCorp Vault](https://www.vaultproject.io/). Although Vault is +licensed under the Business Source License (BUSL), a fully compatible and +actively maintained open source alternative is available: [OpenBao](https://openbao.org/). +OpenBao supports all the same interfaces as HashiCorp Vault, making it a true +drop-in replacement. + +In this example, we'll demonstrate how to integrate CloudNativePG, +External Secrets Operator, and HashiCorp Vault to automatically rotate +a PostgreSQL password and securely store it in Vault. + +!!! Important + This example assumes that HashiCorp Vault is already installed and properly + configured in your environment, and that your team has the necessary expertise + to operate it. There are various ways to deploy Vault, and detailing them is + outside the scope of CloudNativePG. While it's possible to run Vault inside + Kubernetes, it is more commonly deployed externally. For detailed instructions, + consult the [HashiCorp Vault documentation](https://www.vaultproject.io/docs). + +Continuing from the previous example, we will now create the necessary +`SecretStore` and `PushSecret` resources to complete the integration with +Vault. + +### Creating the `SecretStore` + +In this example, we assume that HashiCorp Vault is accessible from within the +namespace at `http://vault.vault.svc:8200`, and that a Kubernetes `Secret` +named `vault-token` exists in the same namespace, containing the token used to +authenticate with Vault. + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: "http://vault.vault.svc:8200" + path: "secrets" + # Specifies the Vault KV secret engine version ("v1" or "v2"). + # Defaults to "v2" if not set. + version: "v2" + auth: + # References a Kubernetes Secret that contains the Vault token. + # See: https://www.vaultproject.io/docs/auth/token + tokenSecretRef: + name: "vault-token" + key: "token" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vault-token +data: + token: aHZzLioqKioqKio= # hvs.******* +``` + +This configuration creates a `SecretStore` resource named `vault-backend`. + +!!! Important + This example uses basic token-based authentication, which is suitable for + testing API, and CLI use cases. While it is the default method enabled in + Vault, it is not recommended for production environments. For production, + consider using more secure authentication methods. + Refer to the [External Secrets Operator documentation](https://external-secrets.io/latest/provider/hashicorp-vault/) + for a full list of supported authentication mechanisms. + +!!! Info + HashiCorp Vault must have a KV secrets engine enabled at the `secrets` path + with version `v2`. If your Vault instance uses a different path or + version, be sure to update the `path` and `version` fields accordingly. + +### Creating the `PushSecret` + +The `PushSecret` resource is used to push a Kubernetes `Secret` to HashiCorp +Vault. In this simplified example, we'll push the credentials for the `app` +user of the sample cluster `cluster-example`. + +For more details on configuring `PushSecret`, refer to the +[External Secrets Operator documentation](https://external-secrets.io/latest/api/pushsecret/). + +```yaml +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + name: pushsecret-example +spec: + deletionPolicy: Delete + refreshInterval: 24h + secretStoreRefs: + - name: vault-backend + kind: SecretStore + selector: + secret: + name: cluster-example-app + data: + - match: + remoteRef: + remoteKey: cluster-example-app +``` + +In this example, the `PushSecret` resource instructs the External Secrets +Operator to push the Kubernetes `Secret` named `cluster-example-app` to +HashiCorp Vault (from the previous example). The `remoteKey` defines the name +under which the secret will be stored in Vault, using the `SecretStore` named +`vault-backend`. + +### Verifying the Configuration + +To verify that the `PushSecret` is functioning correctly, navigate to the +HashiCorp Vault UI. In the `kv` secrets engine at the path `secrets`, you +should find a secret named `cluster-example-app`, corresponding to the +`remoteKey` defined above. diff --git a/docs/src/cnpg_i.md b/docs/src/cnpg_i.md new file mode 100644 index 0000000000..d75522a7fc --- /dev/null +++ b/docs/src/cnpg_i.md @@ -0,0 +1,206 @@ +# CNPG-I + + +The **CloudNativePG Interface** ([CNPG-I](https://github.com/cloudnative-pg/cnpg-i)) +is a standard way to extend and customize CloudNativePG without modifying its +core codebase. + +## Why CNPG-I? + +CloudNativePG supports a wide range of use cases, but sometimes its built-in +functionality isn’t enough, or adding certain features directly to the main +project isn’t practical. + +Before CNPG-I, users had two main options: + +- Fork the project to add custom behavior, or +- Extend the upstream codebase by writing custom components on top of it. + +Both approaches created maintenance overhead, slowed upgrades, and delayed delivery of critical features. + +CNPG-I solves these problems by providing a stable, gRPC-based integration +point for extending CloudNativePG at key points in a cluster’s lifecycle —such +as backups, recovery, and sub-resource reconciliation— without disrupting the +core project. + +CNPG-I can extend: + +- The operator, and/or +- The instance manager running inside PostgreSQL pods. + +## Registering a plugin + +CNPG-I is inspired by the Kubernetes +[Container Storage Interface (CSI)](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/). +The operator communicates with registered plugins using **gRPC**, following the +[CNPG-I protocol](https://github.com/cloudnative-pg/cnpg-i/blob/main/docs/protocol.md). + +CloudNativePG discovers plugins **at startup**. You can register them in one of two ways: + +- Sidecar container – run the plugin inside the operator’s Deployment +- Standalone Deployment – run the plugin as a separate workload in the same + namespace + +In both cases, the plugin must be packaged as a container image. + +### Sidecar Container + +When running as a sidecar, the plugin must expose its gRPC server via a **Unix +domain socket**. This socket must be placed in a directory shared with the +operator container, mounted at the path set in `PLUGIN_SOCKET_DIR` (default: +`/plugin`). + +Example: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - image: cloudnative-pg:latest + [...] + name: manager + volumeMounts: + - mountPath: /plugins + name: cnpg-i-plugins + + - image: cnpg-i-plugin-example:latest + name: cnpg-i-plugin-example + volumeMounts: + - mountPath: /plugins + name: cnpg-i-plugins + volumes: + - name: cnpg-i-plugins + emptyDir: {} +``` + +### Standalone Deployment (recommended) + +Running a plugin as its own Deployment decouples its lifecycle from the +operator’s and allows independent scaling. In this setup, the plugin exposes a +TCP gRPC endpoint behind a Service, with **mTLS** for secure communication. + +!!! Warning + CloudNativePG does **not** discover plugins dynamically. If you deploy a new + plugin, you must **restart the operator** to detect it. + +Example Deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cnpg-i-plugin-example +spec: + template: + [...] + spec: + containers: + - name: cnpg-i-plugin-example + image: cnpg-i-plugin-example:latest + ports: + - containerPort: 9090 + protocol: TCP +``` + +The related Service for the plugin must include: + +- The label `cnpg.io/plugin: ` — required for CloudNativePG to + discover the plugin +- The annotation `cnpg.io/pluginPort: ` — specifies the port where the + plugin’s gRPC server is exposed + +Example Service: + +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + cnpg.io/pluginPort: "9090" + labels: + cnpg.io/pluginName: cnpg-i-plugin-example.my-org.io + name: cnpg-i-plugin-example +spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cnpg-i-plugin-example +``` + +### Configuring TLS Certificates + +When a plugin runs as a `Deployment`, communication with CloudNativePG happens +over the network. To secure it, **mTLS is enforced**, requiring TLS +certificates for both sides. + +Certificates must be stored as [Kubernetes TLS Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) +and referenced in the plugin’s Service annotations +(`cnpg.io/pluginClientSecret` and `cnpg.io/pluginServerSecret`): + +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + cnpg.io/pluginClientSecret: cnpg-i-plugin-example-client-tls + cnpg.io/pluginServerSecret: cnpg-i-plugin-example-server-tls + cnpg.io/pluginPort: "9090" + name: barman-cloud + namespace: postgresql-operator-system +spec: + [...] +``` + +!!! Note + You can provide your own certificate bundles, but the recommended method is + to use [Cert-manager](https://cert-manager.io). + +## Using a plugin + +To enable a plugin, configure the `.spec.plugins` section in your `Cluster` +resource. Refer to the CloudNativePG API Reference for the full +[PluginConfiguration](https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PluginConfiguration) +specification. + +Example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-with-plugins +spec: + instances: 1 + storage: + size: 1Gi + plugins: + - name: cnpg-i-plugin-example.my-org.io + enabled: true + parameters: + key1: value1 + key2: value2 +``` + +Each plugin may have its own parameters—check the plugin’s documentation for +details. The `name` field in `spec.plugins` depends on how the plugin is +deployed: + +- Sidecar container: use the Unix socket file name +- Deployment: use the value from the Service’s `cnpg.io/pluginName` label + +## Community plugins + +The CNPG-I protocol has quickly become a proven and reliable pattern for +extending CloudNativePG while keeping the core project maintainable. +Over time, the community has built and shared plugins that address real-world +needs and serve as examples for developers. + +For a complete and up-to-date list of plugins built with CNPG-I, please refer to the +[CNPG-I GitHub page](https://github.com/cloudnative-pg/cnpg-i?tab=readme-ov-file#projects-built-with-cnpg-i). diff --git a/docs/src/commercial_support.md b/docs/src/commercial_support.md deleted file mode 100644 index 1b9ca9da55..0000000000 --- a/docs/src/commercial_support.md +++ /dev/null @@ -1,12 +0,0 @@ -# Commercial support - -CloudNativePG is an independent open source project that doesn't endorse any -company. - -The ["Support" page](https://cloudnative-pg.io/support/) website lists -third-party companies and individuals that provide products or services related -to CloudNativePG. - -If you're providing commercial support for CloudNativePG, add yourself or your -organization to that list. - diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md index cf6b9bc310..57d961835c 100644 --- a/docs/src/connection_pooling.md +++ b/docs/src/connection_pooling.md @@ -1,4 +1,5 @@ # Connection pooling + CloudNativePG provides native support for connection pooling with [PgBouncer](https://www.pgbouncer.org/), one of the most popular open source @@ -331,13 +332,17 @@ are the ones directly set by PgBouncer. - [`application_name_add_host`](https://www.pgbouncer.org/config.html#application_name_add_host) - [`autodb_idle_timeout`](https://www.pgbouncer.org/config.html#autodb_idle_timeout) +- [`cancel_wait_timeout`](https://www.pgbouncer.org/config.html#cancel_wait_timeout) - [`client_idle_timeout`](https://www.pgbouncer.org/config.html#client_idle_timeout) - [`client_login_timeout`](https://www.pgbouncer.org/config.html#client_login_timeout) - [`default_pool_size`](https://www.pgbouncer.org/config.html#default_pool_size) - [`disable_pqexec`](https://www.pgbouncer.org/config.html#disable_pqexec) +- [`dns_max_ttl`](https://www.pgbouncer.org/config.html#dns_max_ttl) +- [`dns_nxdomain_ttl`](https://www.pgbouncer.org/config.html#dns_nxdomain_ttl) - [`idle_transaction_timeout`](https://www.pgbouncer.org/config.html#idle_transaction_timeout) - [`ignore_startup_parameters`](https://www.pgbouncer.org/config.html#ignore_startup_parameters): - to be appended to `extra_float_digits,options` - required by CNP + to be appended to `extra_float_digits,options` - required by CloudNativePG +- [`listen_backlog`](https://www.pgbouncer.org/config.html#listen_backlog) - [`log_connections`](https://www.pgbouncer.org/config.html#log_connections) - [`log_disconnections`](https://www.pgbouncer.org/config.html#log_disconnections) - [`log_pooler_errors`](https://www.pgbouncer.org/config.html#log_pooler_errors) @@ -346,13 +351,16 @@ are the ones directly set by PgBouncer. export as described in the ["Monitoring"](#monitoring) section below - [`max_client_conn`](https://www.pgbouncer.org/config.html#max_client_conn) - [`max_db_connections`](https://www.pgbouncer.org/config.html#max_db_connections) +- [`max_packet_size`](https://www.pgbouncer.org/config.html#max_packet_size) - [`max_prepared_statements`](https://www.pgbouncer.org/config.html#max_prepared_statements) - [`max_user_connections`](https://www.pgbouncer.org/config.html#max_user_connections) - [`min_pool_size`](https://www.pgbouncer.org/config.html#min_pool_size) +- [`pkt_buf`](https://www.pgbouncer.org/config.html#pkt_buf) - [`query_timeout`](https://www.pgbouncer.org/config.html#query_timeout) - [`query_wait_timeout`](https://www.pgbouncer.org/config.html#query_wait_timeout) - [`reserve_pool_size`](https://www.pgbouncer.org/config.html#reserve_pool_size) - [`reserve_pool_timeout`](https://www.pgbouncer.org/config.html#reserve_pool_timeout) +- [`sbuf_loopcnt`](https://www.pgbouncer.org/config.html#sbuf_loopcnt) - [`server_check_delay`](https://www.pgbouncer.org/config.html#server_check_delay) - [`server_check_query`](https://www.pgbouncer.org/config.html#server_check_query) - [`server_connect_timeout`](https://www.pgbouncer.org/config.html#server_connect_timeout) @@ -363,12 +371,18 @@ are the ones directly set by PgBouncer. - [`server_reset_query`](https://www.pgbouncer.org/config.html#server_reset_query) - [`server_reset_query_always`](https://www.pgbouncer.org/config.html#server_reset_query_always) - [`server_round_robin`](https://www.pgbouncer.org/config.html#server_round_robin) +- [`server_tls_ciphers`](https://www.pgbouncer.org/config.html#server_tls_ciphers) +- [`server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols) - [`stats_period`](https://www.pgbouncer.org/config.html#stats_period) +- [`suspend_timeout`](https://www.pgbouncer.org/config.html#suspend_timeout) +- [`tcp_defer_accept`](https://www.pgbouncer.org/config.html#tcp_defer_accept) - [`tcp_keepalive`](https://www.pgbouncer.org/config.html#tcp_keepalive) - [`tcp_keepcnt`](https://www.pgbouncer.org/config.html#tcp_keepcnt) - [`tcp_keepidle`](https://www.pgbouncer.org/config.html#tcp_keepidle) - [`tcp_keepintvl`](https://www.pgbouncer.org/config.html#tcp_keepintvl) - [`tcp_user_timeout`](https://www.pgbouncer.org/config.html#tcp_user_timeout) +- [`tcp_socket_buffer`](https://www.pgbouncer.org/config.html#tcp_socket_buffer) +- [`track_extra_parameters`](https://www.pgbouncer.org/config.html#track_extra_parameters) - [`verbose`](https://www.pgbouncer.org/config.html#verbose) Customizations of the PgBouncer configuration are written declaratively in the @@ -408,165 +422,165 @@ This example shows the output for `cnpg_pgbouncer` metrics: ```text # HELP cnpg_pgbouncer_collection_duration_seconds Collection time duration in seconds # TYPE cnpg_pgbouncer_collection_duration_seconds gauge -cnpg_pgbouncer_collection_duration_seconds{collector="Collect.up"} 0.002443168 - +cnpg_pgbouncer_collection_duration_seconds{collector="Collect.up"} 0.002338805 +# HELP cnpg_pgbouncer_collection_errors_total Total errors occurred accessing PostgreSQL for metrics. +# TYPE cnpg_pgbouncer_collection_errors_total counter +cnpg_pgbouncer_collection_errors_total{collector="sql: Scan error on column index 16, name \"load_balance_hosts\": converting NULL to int is unsupported"} 5 # HELP cnpg_pgbouncer_collections_total Total number of times PostgreSQL was accessed for metrics. # TYPE cnpg_pgbouncer_collections_total counter -cnpg_pgbouncer_collections_total 1 - +cnpg_pgbouncer_collections_total 5 # HELP cnpg_pgbouncer_last_collection_error 1 if the last collection ended with error, 0 otherwise. # TYPE cnpg_pgbouncer_last_collection_error gauge cnpg_pgbouncer_last_collection_error 0 - # HELP cnpg_pgbouncer_lists_databases Count of databases. # TYPE cnpg_pgbouncer_lists_databases gauge cnpg_pgbouncer_lists_databases 1 - # HELP cnpg_pgbouncer_lists_dns_names Count of DNS names in the cache. # TYPE cnpg_pgbouncer_lists_dns_names gauge cnpg_pgbouncer_lists_dns_names 0 - # HELP cnpg_pgbouncer_lists_dns_pending Not used. # TYPE cnpg_pgbouncer_lists_dns_pending gauge cnpg_pgbouncer_lists_dns_pending 0 - # HELP cnpg_pgbouncer_lists_dns_queries Count of in-flight DNS queries. # TYPE cnpg_pgbouncer_lists_dns_queries gauge cnpg_pgbouncer_lists_dns_queries 0 - # HELP cnpg_pgbouncer_lists_dns_zones Count of DNS zones in the cache. # TYPE cnpg_pgbouncer_lists_dns_zones gauge cnpg_pgbouncer_lists_dns_zones 0 - # HELP cnpg_pgbouncer_lists_free_clients Count of free clients. # TYPE cnpg_pgbouncer_lists_free_clients gauge cnpg_pgbouncer_lists_free_clients 49 - # HELP cnpg_pgbouncer_lists_free_servers Count of free servers. # TYPE cnpg_pgbouncer_lists_free_servers gauge cnpg_pgbouncer_lists_free_servers 0 - # HELP cnpg_pgbouncer_lists_login_clients Count of clients in login state. # TYPE cnpg_pgbouncer_lists_login_clients gauge cnpg_pgbouncer_lists_login_clients 0 - # HELP cnpg_pgbouncer_lists_pools Count of pools. # TYPE cnpg_pgbouncer_lists_pools gauge cnpg_pgbouncer_lists_pools 1 - # HELP cnpg_pgbouncer_lists_used_clients Count of used clients. # TYPE cnpg_pgbouncer_lists_used_clients gauge cnpg_pgbouncer_lists_used_clients 1 - # HELP cnpg_pgbouncer_lists_used_servers Count of used servers. # TYPE cnpg_pgbouncer_lists_used_servers gauge cnpg_pgbouncer_lists_used_servers 0 - # HELP cnpg_pgbouncer_lists_users Count of users. # TYPE cnpg_pgbouncer_lists_users gauge cnpg_pgbouncer_lists_users 2 - # HELP cnpg_pgbouncer_pools_cl_active Client connections that are linked to server connection and can process queries. # TYPE cnpg_pgbouncer_pools_cl_active gauge cnpg_pgbouncer_pools_cl_active{database="pgbouncer",user="pgbouncer"} 1 - +# HELP cnpg_pgbouncer_pools_cl_active_cancel_req Client connections that have forwarded query cancellations to the server and are waiting for the server response. +# TYPE cnpg_pgbouncer_pools_cl_active_cancel_req gauge +cnpg_pgbouncer_pools_cl_active_cancel_req{database="pgbouncer",user="pgbouncer"} 0 # HELP cnpg_pgbouncer_pools_cl_cancel_req Client connections that have not forwarded query cancellations to the server yet. # TYPE cnpg_pgbouncer_pools_cl_cancel_req gauge cnpg_pgbouncer_pools_cl_cancel_req{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_cl_waiting Client connections that have sent queries but have not yet got a server connection. # TYPE cnpg_pgbouncer_pools_cl_waiting gauge cnpg_pgbouncer_pools_cl_waiting{database="pgbouncer",user="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_pools_cl_waiting_cancel_req Client connections that have not forwarded query cancellations to the server yet. +# TYPE cnpg_pgbouncer_pools_cl_waiting_cancel_req gauge +cnpg_pgbouncer_pools_cl_waiting_cancel_req{database="pgbouncer",user="pgbouncer"} 0 +# HELP cnpg_pgbouncer_pools_load_balance_hosts Number of hosts not load balancing between hosts +# TYPE cnpg_pgbouncer_pools_load_balance_hosts gauge +cnpg_pgbouncer_pools_load_balance_hosts{database="pgbouncer",user="pgbouncer"} 0 # HELP cnpg_pgbouncer_pools_maxwait How long the first (oldest) client in the queue has waited, in seconds. If this starts increasing, then the current pool of servers does not handle requests quickly enough. The reason may be either an overloaded server or just too small of a pool_size setting. # TYPE cnpg_pgbouncer_pools_maxwait gauge cnpg_pgbouncer_pools_maxwait{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_maxwait_us Microsecond part of the maximum waiting time. # TYPE cnpg_pgbouncer_pools_maxwait_us gauge cnpg_pgbouncer_pools_maxwait_us{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_pool_mode The pooling mode in use. 1 for session, 2 for transaction, 3 for statement, -1 if unknown # TYPE cnpg_pgbouncer_pools_pool_mode gauge cnpg_pgbouncer_pools_pool_mode{database="pgbouncer",user="pgbouncer"} 3 - # HELP cnpg_pgbouncer_pools_sv_active Server connections that are linked to a client. # TYPE cnpg_pgbouncer_pools_sv_active gauge cnpg_pgbouncer_pools_sv_active{database="pgbouncer",user="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_pools_sv_active_cancel Server connections that are currently forwarding a cancel request +# TYPE cnpg_pgbouncer_pools_sv_active_cancel gauge +cnpg_pgbouncer_pools_sv_active_cancel{database="pgbouncer",user="pgbouncer"} 0 # HELP cnpg_pgbouncer_pools_sv_idle Server connections that are unused and immediately usable for client queries. # TYPE cnpg_pgbouncer_pools_sv_idle gauge cnpg_pgbouncer_pools_sv_idle{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_sv_login Server connections currently in the process of logging in. # TYPE cnpg_pgbouncer_pools_sv_login gauge cnpg_pgbouncer_pools_sv_login{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_sv_tested Server connections that are currently running either server_reset_query or server_check_query. # TYPE cnpg_pgbouncer_pools_sv_tested gauge cnpg_pgbouncer_pools_sv_tested{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_sv_used Server connections that have been idle for more than server_check_delay, so they need server_check_query to run on them before they can be used again. # TYPE cnpg_pgbouncer_pools_sv_used gauge cnpg_pgbouncer_pools_sv_used{database="pgbouncer",user="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_pools_sv_wait_cancels Servers that normally could become idle, but are waiting to do so until all in-flight cancel requests have completed that were sent to cancel a query on this server. +# TYPE cnpg_pgbouncer_pools_sv_wait_cancels gauge +cnpg_pgbouncer_pools_sv_wait_cancels{database="pgbouncer",user="pgbouncer"} 0 +# HELP cnpg_pgbouncer_stats_avg_bind_count Average number of prepared statements readied for execution by clients and forwarded to PostgreSQL by pgbouncer. +# TYPE cnpg_pgbouncer_stats_avg_bind_count gauge +cnpg_pgbouncer_stats_avg_bind_count{database="pgbouncer"} 0 +# HELP cnpg_pgbouncer_stats_avg_client_parse_count Average number of prepared statements created by clients. +# TYPE cnpg_pgbouncer_stats_avg_client_parse_count gauge +cnpg_pgbouncer_stats_avg_client_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_query_count Average queries per second in last stat period. # TYPE cnpg_pgbouncer_stats_avg_query_count gauge -cnpg_pgbouncer_stats_avg_query_count{database="pgbouncer"} 1 - +cnpg_pgbouncer_stats_avg_query_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_query_time Average query duration, in microseconds. # TYPE cnpg_pgbouncer_stats_avg_query_time gauge cnpg_pgbouncer_stats_avg_query_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_avg_recv Average received (from clients) bytes per second. # TYPE cnpg_pgbouncer_stats_avg_recv gauge cnpg_pgbouncer_stats_avg_recv{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_avg_sent Average sent (to clients) bytes per second. # TYPE cnpg_pgbouncer_stats_avg_sent gauge cnpg_pgbouncer_stats_avg_sent{database="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_stats_avg_server_parse_count Average number of prepared statements created by pgbouncer on a server. +# TYPE cnpg_pgbouncer_stats_avg_server_parse_count gauge +cnpg_pgbouncer_stats_avg_server_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_wait_time Time spent by clients waiting for a server, in microseconds (average per second). # TYPE cnpg_pgbouncer_stats_avg_wait_time gauge cnpg_pgbouncer_stats_avg_wait_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_avg_xact_count Average transactions per second in last stat period. # TYPE cnpg_pgbouncer_stats_avg_xact_count gauge -cnpg_pgbouncer_stats_avg_xact_count{database="pgbouncer"} 1 - +cnpg_pgbouncer_stats_avg_xact_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_xact_time Average transaction duration, in microseconds. # TYPE cnpg_pgbouncer_stats_avg_xact_time gauge cnpg_pgbouncer_stats_avg_xact_time{database="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_stats_total_bind_count Total number of prepared statements readied for execution by clients and forwarded to PostgreSQL by pgbouncer +# TYPE cnpg_pgbouncer_stats_total_bind_count gauge +cnpg_pgbouncer_stats_total_bind_count{database="pgbouncer"} 0 +# HELP cnpg_pgbouncer_stats_total_client_parse_count Total number of prepared statements created by clients. +# TYPE cnpg_pgbouncer_stats_total_client_parse_count gauge +cnpg_pgbouncer_stats_total_client_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_total_query_count Total number of SQL queries pooled by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_query_count gauge -cnpg_pgbouncer_stats_total_query_count{database="pgbouncer"} 3 - +cnpg_pgbouncer_stats_total_query_count{database="pgbouncer"} 15 # HELP cnpg_pgbouncer_stats_total_query_time Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL, executing queries. # TYPE cnpg_pgbouncer_stats_total_query_time gauge cnpg_pgbouncer_stats_total_query_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_total_received Total volume in bytes of network traffic received by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_received gauge cnpg_pgbouncer_stats_total_received{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_total_sent Total volume in bytes of network traffic sent by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_sent gauge cnpg_pgbouncer_stats_total_sent{database="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_stats_total_server_parse_count Total number of prepared statements created by pgbouncer on a server. +# TYPE cnpg_pgbouncer_stats_total_server_parse_count gauge +cnpg_pgbouncer_stats_total_server_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_total_wait_time Time spent by clients waiting for a server, in microseconds. # TYPE cnpg_pgbouncer_stats_total_wait_time gauge cnpg_pgbouncer_stats_total_wait_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_total_xact_count Total number of SQL transactions pooled by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_xact_count gauge -cnpg_pgbouncer_stats_total_xact_count{database="pgbouncer"} 3 - +cnpg_pgbouncer_stats_total_xact_count{database="pgbouncer"} 15 # HELP cnpg_pgbouncer_stats_total_xact_time Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries. # TYPE cnpg_pgbouncer_stats_total_xact_time gauge cnpg_pgbouncer_stats_total_xact_time{database="pgbouncer"} 0 ``` +!!! Info + For a better understanding of the metrics please refer to the PgBouncer documentation. + As for clusters, a specific pooler can be monitored using the [Prometheus operator's](https://github.com/prometheus-operator/prometheus-operator) resource [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor). diff --git a/docs/src/container_images.md b/docs/src/container_images.md index d198695945..208c5d2a7a 100644 --- a/docs/src/container_images.md +++ b/docs/src/container_images.md @@ -1,41 +1,40 @@ # Container Image Requirements + -The CloudNativePG operator for Kubernetes is designed to -work with any compatible container image of PostgreSQL that complies -with the following requirements: - -- PostgreSQL executables that must be in the path: - - `initdb` - - `postgres` - - `pg_ctl` - - `pg_controldata` - - `pg_basebackup` -- Barman Cloud executables that must be in the path: - - `barman-cloud-backup` - - `barman-cloud-backup-delete` - - `barman-cloud-backup-list` - - `barman-cloud-check-wal-archive` - - `barman-cloud-restore` - - `barman-cloud-wal-archive` - - `barman-cloud-wal-restore` -- PGAudit extension installed (optional - only if PGAudit is required - in the deployed clusters) -- Appropriate locale settings +The CloudNativePG operator for Kubernetes is designed to work with any +compatible PostgreSQL container image that meets the following requirements: + +- PostgreSQL executables must be available in the system path: + - `initdb` + - `postgres` + - `pg_ctl` + - `pg_controldata` + - `pg_basebackup` +- Proper locale settings configured + +Optional Components: + +- [PGAudit](https://www.pgaudit.org/) extension (only required if audit logging + is needed) +- `du` (used for `kubectl cnpg status`) !!! Important - Only [PostgreSQL versions supported by the PGDG](https://postgresql.org/) are allowed. + Only [PostgreSQL versions officially supported by PGDG](https://postgresql.org/) are allowed. + +!!! Info + Barman Cloud executables are no longer required in CloudNativePG. The + recommended approach is to use the dedicated [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud). -No entry point and/or command is required in the image definition, as -CloudNativePG overrides it with its instance manager. +No entry point or command is required in the image definition. CloudNativePG +automatically overrides it with its instance manager. !!! Warning - Application Container Images will be used by CloudNativePG - in a **Primary with multiple/optional Hot Standby Servers Architecture** - only. + CloudNativePG only supports **Primary with multiple/optional Hot Standby + Servers architecture** for PostgreSQL application container images. -The CloudNativePG community provides and supports +The CloudNativePG community provides and maintains [public PostgreSQL container images](https://github.com/cloudnative-pg/postgres-containers) -that work with CloudNativePG, and publishes them on +that are fully compatible with CloudNativePG. These images are published on [ghcr.io](https://ghcr.io/cloudnative-pg/postgresql). ## Image Tag Requirements diff --git a/docs/src/controller.md b/docs/src/controller.md index cb000833cf..b90e1ae1db 100644 --- a/docs/src/controller.md +++ b/docs/src/controller.md @@ -1,4 +1,5 @@ # Custom Pod Controller + Kubernetes uses the [Controller pattern](https://kubernetes.io/docs/concepts/architecture/controller/) diff --git a/docs/src/database_import.md b/docs/src/database_import.md index f8bba32c4a..593ea430e0 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -1,4 +1,5 @@ # Importing Postgres databases + This section describes how to import one or more existing PostgreSQL databases inside a brand new CloudNativePG cluster. @@ -17,8 +18,8 @@ As a result, the instructions in this section are suitable for both: - importing one or more databases from an existing PostgreSQL instance, even outside Kubernetes - importing the database from any PostgreSQL version to one that is either the - same or newer, enabling *major upgrades* of PostgreSQL (e.g. from version 11.x - to version 15.x) + same or newer, enabling *major upgrades* of PostgreSQL (e.g. from version 13.x + to version 17.x) !!! Warning When performing major upgrades of PostgreSQL you are responsible for making @@ -55,14 +56,14 @@ into the destination cluster: - **monolith approach**: the destination cluster is designed to host multiple databases and different users, imported from the source cluster -The first import method is available via the `microservice` type, while the -latter by the `monolith` type. +The first import method is available via the `microservice` type, the +second via the `monolith` type. !!! Warning It is your responsibility to ensure that the destination cluster can access the source cluster with a superuser or a user having enough privileges to take a logical backup with `pg_dump`. Please refer to the - [PostgreSQL documentation on "SQL Dump"](https://www.postgresql.org/docs/current/app-pgdump.html) + [PostgreSQL documentation on `pg_dump`](https://www.postgresql.org/docs/current/app-pgdump.html) for further information. ## The `microservice` type @@ -73,7 +74,7 @@ performed in 4 steps: - `initdb` bootstrap of the new cluster - export of the selected database (in `initdb.import.databases`) using - `pg_dump -Fc` + `pg_dump -Fd` - import of the database using `pg_restore --no-acl --no-owner` into the `initdb.database` (application database) owned by the `initdb.owner` user - cleanup of the database dump file @@ -81,6 +82,10 @@ performed in 4 steps: database via the `postImportApplicationSQL` parameter - execution of `ANALYZE VERBOSE` on the imported database +In the figure below, a single PostgreSQL cluster containing *N* databases is +imported into separate CloudNativePG clusters, with each cluster using a +microservice import for one of the *N* source databases. + ![Example of microservice import type](./images/microservice-import.png) For example, the YAML below creates a new 3 instance PostgreSQL cluster (latest @@ -134,7 +139,7 @@ spec: and unsupported versions of Postgres too, giving you the chance to move your legacy data to a better system, inside Kubernetes. This is the main reason why we used 9.6 in the examples of this section. - We'd be interested to hear from you should you experience any issues in this area. + We'd be interested to hear from you, should you experience any issues in this area. There are a few things you need to be aware of when using the `microservice` type: @@ -145,7 +150,7 @@ There are a few things you need to be aware of when using the `microservice` typ `externalCluster` during the operation - Connection to the source database must be granted with the specified user that needs to run `pg_dump` and read roles information (*superuser* is OK) -- Currently, the `pg_dump -Fc` result is stored temporarily inside the `dumps` +- Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps` folder in the `PGDATA` volume, so there should be enough available space to temporarily contain the dump result on the assigned node, as well as the restored data and indexes. Once the import operation is completed, this @@ -153,6 +158,12 @@ There are a few things you need to be aware of when using the `microservice` typ - Only one database can be specified inside the `initdb.import.databases` array - Roles are not imported - and as such they cannot be specified inside `initdb.import.roles` +!!! Hint + The microservice approach adheres to CloudNativePG conventions and defaults + for the destination cluster. If you do not set `initdb.database` or + `initdb.owner` for the destination cluster, both parameters will default to + `app`. + ## The `monolith` type With the monolith approach, you can specify a set of roles and databases you @@ -162,7 +173,7 @@ The operation is performed in the following steps: - `initdb` bootstrap of the new cluster - export and import of the selected roles - export of the selected databases (in `initdb.import.databases`), one at a time, - using `pg_dump -Fc` + using `pg_dump -Fd` - create each of the selected databases and import data using `pg_restore` - run `ANALYZE` on each imported database - cleanup of the database dump files @@ -222,8 +233,9 @@ There are a few things you need to be aware of when using the `monolith` type: - Connection to the source database must be granted with the specified user that needs to run `pg_dump` and retrieve roles information (*superuser* is OK) -- Currently, the `pg_dump -Fc` result is stored temporarily inside the `dumps` - folder in the `PGDATA` volume, so there should be enough available space to +- Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps` + folder in the `PGDATA` volume of the destination cluster's instances, so + there should be enough available space to temporarily contain the dump result on the assigned node, as well as the restored data and indexes. Once the import operation is completed, this folder is automatically deleted by the operator. @@ -231,7 +243,7 @@ There are a few things you need to be aware of when using the `monolith` type: - Any role that is required by the imported databases must be specified inside `initdb.import.roles`, with the limitations below: - The following roles, if present, are not imported: - `postgres`, `streaming_replica`, `cnp_pooler_pgbouncer` + `postgres`, `streaming_replica`, `cnpg_pooler_pgbouncer` - The `SUPERUSER` option is removed from any imported role - Wildcard `"*"` can be used as the only element in the `databases` and/or `roles` arrays to import every object of the kind; When matching databases @@ -239,7 +251,117 @@ There are a few things you need to be aware of when using the `monolith` type: and those databases not allowing connections - After the clone procedure is done, `ANALYZE VERBOSE` is executed for every database. -- `postImportApplicationSQL` field is not supported +- The `postImportApplicationSQL` field is not supported + +!!! Hint + The databases and their owners are preserved exactly as they exist in the + source cluster—no `app` database or user will be created during import. If your + `bootstrap.initdb` stanza specifies custom `database` and `owner` values that + do not match any of the databases or users being imported, the instance + manager will create a new, empty application database and owner role with those + specified names, while leaving the imported databases and owners unchanged. + +## A practical example + +There is nothing to stop you from using the `monolith` approach to import a +single database. It is interesting to see how the results of doing so would +differ from using the `microservice` approach. + +Given a source cluster, for example the following, with a database named +`mydb` owned by role `me`: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 1 + + postgresql: + pg_hba: + - host all all all trust + + storage: + size: 1Gi + + bootstrap: + initdb: + database: mydb + owner: me +``` + +We can import it via `microservice`: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-microservice +spec: + instances: 1 + + storage: + size: 1Gi + + bootstrap: + initdb: + import: + type: microservice + databases: + - mydb + source: + externalCluster: cluster-example + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw + dbname: postgres +``` + +as well as via monolith: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-monolith +spec: + instances: 1 + + storage: + size: 1Gi + + bootstrap: + initdb: + import: + type: monolith + databases: + - mydb + roles: + - me + source: + externalCluster: cluster-example + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw + dbname: postgres +``` + +In both cases, the database's contents will be imported, but: + +- In the microservice case, the imported database's name and owner both become + `app`, or whichever configuration for the fields `database` and `owner` are + set in the `bootstrap.initdb` stanza. +- In the monolith case, the database and owner are kept exactly as in the source + cluster, i.e. `mydb` and `me` respectively. No `app` database nor user will be + created. If there are custom settings for `database` and `owner` in the + `bootstrap.initdb` stanza that don't match the source databases/owners to + import, the instance manager will create a new empty application database and + owner role, but will leave the imported databases/owners intact. ## Import optimizations @@ -267,3 +389,51 @@ topic is beyond the scope of CloudNativePG, we recommend that you reduce unnecessary writes in the checkpoint area by tuning Postgres GUCs like `shared_buffers`, `max_wal_size`, `checkpoint_timeout` directly in the `Cluster` configuration. + +## Customizing `pg_dump` and `pg_restore` Behavior + +You can customize the behavior of `pg_dump` and `pg_restore` by specifying +additional options using the `pgDumpExtraOptions` and `pgRestoreExtraOptions` +parameters. For instance, you can enable parallel jobs to speed up data +import/export processes, as shown in the following example: + +```yaml + # + bootstrap: + initdb: + import: + type: microservice + databases: + - app + source: + externalCluster: cluster-example + pgDumpExtraOptions: + - '--jobs=2' + pgRestoreExtraOptions: + - '--jobs=2' + # +``` + +!!! Warning + Use the `pgDumpExtraOptions` and `pgRestoreExtraOptions` fields with + caution and at your own risk. These options are not validated or verified by + the operator, and some configurations may conflict with its intended + functionality or behavior. Always test thoroughly in a safe and controlled + environment before applying them in production. + +## Online Import and Upgrades + +Logical replication offers a powerful way to import any PostgreSQL database +accessible over the network using the following approach: + +- **Import Bootstrap with Schema-Only Option**: Initialize the schema in the + target database before replication begins. +- **`Subscription` Resource**: Set up continuous replication to synchronize + data changes. + +This technique can also be leveraged for performing major PostgreSQL upgrades +with minimal downtime, making it ideal for seamless migrations and system +upgrades. + +For more details, including limitations and best practices, refer to the +[Logical Replication](logical_replication.md) section in the documentation. diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index 27fc7638a2..b938a65d32 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -1,54 +1,134 @@ -# Declarative Database Management +# PostgreSQL Database Management + -Declarative database management enables users to control the lifecycle of -databases via a new Custom Resource Definition (CRD) called `Database`. +CloudNativePG simplifies PostgreSQL database provisioning by automatically +creating an application database named `app` by default. This default behavior +is explained in the ["Bootstrap an Empty Cluster"](bootstrap.md#bootstrap-an-empty-cluster-initdb) +section. -A `Database` object is managed by the instance manager of the cluster's -primary instance. This feature is not supported in replica clusters, -as replica clusters lack a primary instance to manage the `Database` object. +For more advanced use cases, CloudNativePG introduces **declarative database +management**, which empowers users to define and control the lifecycle of +PostgreSQL databases using the `Database` Custom Resource Definition (CRD). +This method seamlessly integrates with Kubernetes, providing a scalable, +automated, and consistent approach to managing PostgreSQL databases. -### Example: Simple Database Declaration +--- -Below is an example of a basic `Database` configuration: +## Key Concepts + +### Scope of Management + +!!! Important + CloudNativePG manages **global objects** in PostgreSQL clusters, including + databases, roles, and tablespaces. However, it does **not** manage database content + beyond extensions and schemas (e.g., tables). To manage database content, use specialized + tools or rely on the applications themselves. + +### Declarative `Database` Manifest + +The following example demonstrates how a `Database` resource interacts with a +`Cluster`: ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Database metadata: - name: db-one + name: cluster-example-one spec: name: one owner: app cluster: name: cluster-example + extensions: + - name: bloom + ensure: present +``` + +When applied, this manifest creates a `Database` object called +`cluster-example-one` requesting a database named `one`, owned by the `app` +role, in the `cluster-example` PostgreSQL cluster. + +!!! Info + Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-DatabaseSpec) + the full list of attributes you can define for each `Database` object. + +### Required Fields in the `Database` Manifest + +- `metadata.name`: Unique name of the Kubernetes object within its namespace. +- `spec.name`: Name of the database as it will appear in PostgreSQL. +- `spec.owner`: PostgreSQL role that owns the database. +- `spec.cluster.name`: Name of the target PostgreSQL cluster. + +The `Database` object must reference a specific `Cluster`, determining where +the database will be created. It is managed by the cluster's primary instance, +ensuring the database is created or updated as needed. + +!!! Info + The distinction between `metadata.name` and `spec.name` allows multiple + `Database` resources to reference databases with the same name across different + CloudNativePG clusters in the same Kubernetes namespace. + +## Reserved Database Names + +PostgreSQL automatically creates databases such as `postgres`, `template0`, and +`template1`. These names are reserved and cannot be used for new `Database` +objects in CloudNativePG. + +!!! Important + Creating a `Database` with `spec.name` set to `postgres`, `template0`, or + `template1` is not allowed. + +## Reconciliation and Status + +Once a `Database` object is reconciled successfully: + +- `status.applied` will be set to `true`. +- `status.observedGeneration` will match the `metadata.generation` of the last + applied configuration. + +Example of a reconciled `Database` object: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + generation: 1 + name: cluster-example-one +spec: + cluster: + name: cluster-example + name: one + owner: app +status: + observedGeneration: 1 + applied: true ``` -Once the reconciliation cycle is completed successfully, the `Database` -status will show a `ready` field set to `true` and an empty `error` field. +If an error occurs during reconciliation, `status.applied` will be `false`, and +an error message will be included in the `status.message` field. + +## Deleting a Database -### Database Deletion and Reclaim Policies +CloudNativePG supports two methods for database deletion: -A finalizer named `cnpg.io/deleteDatabase` is automatically added -to each `Database` object to control its deletion process. +1. Using the `delete` reclaim policy +2. Declaratively setting the database's `ensure` field to `absent` -By default, the `databaseReclaimPolicy` is set to `retain`, which means -that if the `Database` object is deleted, the actual PostgreSQL database -is retained for manual management by an administrator. +### Deleting via `delete` Reclaim Policy -Alternatively, if the `databaseReclaimPolicy` is set to `delete`, -the PostgreSQL database will be automatically deleted when the `Database` -object is removed. +The `databaseReclaimPolicy` field determines the behavior when a `Database` +object is deleted: -### Example: Database with Delete Reclaim Policy +- `retain` (default): The database remains in PostgreSQL for manual management. +- `delete`: The database is automatically removed from PostgreSQL. -The following example illustrates a `Database` object with a `delete` -reclaim policy: +Example: ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Database metadata: - name: db-one-with-delete-reclaim-policy + name: cluster-example-two spec: databaseReclaimPolicy: delete name: two @@ -57,4 +137,248 @@ spec: name: cluster-example ``` -In this case, when the `Database` object is deleted, the corresponding PostgreSQL database will also be removed automatically. +Deleting this `Database` object will automatically remove the `two` database +from the `cluster-example` cluster. + +### Declaratively Setting `ensure: absent` + +To remove a database, set the `ensure` field to `absent` like in the following +example:. + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: cluster-example-database-to-drop +spec: + cluster: + name: cluster-example + name: database-to-drop + owner: app + ensure: absent +``` + +This manifest ensures that the `database-to-drop` database is removed from the +`cluster-example` cluster. + +## Managing Extensions in a Database + +!!! Info + While extensions are database-scoped rather than global objects, + CloudNativePG provides a declarative interface for managing them. This approach + is necessary because installing certain extensions may require superuser + privileges, which CloudNativePG recommends disabling by default. By leveraging + this API, users can efficiently manage extensions in a scalable and controlled + manner without requiring elevated privileges. + +CloudNativePG simplifies and automates the management of PostgreSQL extensions within the +target database. + +To enable this feature, define the `spec.extensions` field +with a list of extension specifications, as shown in the following example: + +```yaml +# ... +spec: + extensions: + - name: bloom + ensure: present +# ... +``` + +Each extension entry supports the following properties: + +- `name` *(mandatory)*: The name of the extension. +- `ensure`: Specifies whether the extension should be present or absent in the + database: + - `present`: Ensures that the extension is installed (default). + - `absent`: Ensures that the extension is removed. +- `version`: The specific version of the extension to install or + upgrade to. +- `schema`: The schema in which the extension should be installed. + +!!! Info + CloudNativePG manages extensions using the following PostgreSQL’s SQL commands: + [`CREATE EXTENSION`](https://www.postgresql.org/docs/current/sql-createextension.html), + [`DROP EXTENSION`](https://www.postgresql.org/docs/current/sql-dropextension.html), + [`ALTER EXTENSION`](https://www.postgresql.org/docs/current/sql-alterextension.html) + (limited to `UPDATE TO` and `SET SCHEMA`). + +The operator reconciles only the extensions explicitly listed in +`spec.extensions`. Any existing extensions not specified in this list remain +unchanged. + +!!! Warning + Before the introduction of declarative extension management, CloudNativePG + did not offer a straightforward way to create extensions through configuration. + To address this, the ["managed extensions"](postgresql_conf.md#managed-extensions) + feature was introduced, enabling the automated and transparent management + of key extensions like `pg_stat_statements`. Currently, it is your + responsibility to ensure there are no conflicts between extension support in + the `Database` CRD and the managed extensions feature. + +## Managing Schemas in a Database + +!!! Info + Schema management in PostgreSQL is an exception to CloudNativePG's primary + focus on managing global objects. Since schemas exist within a database, they + are typically managed as part of the application development process. However, + CloudNativePG provides a declarative interface for schema management, primarily + to complete the support of extensions deployment within schemas. + +CloudNativePG simplifies and automates the management of PostgreSQL schemas within the +target database. + +To enable this feature, define the `spec.schemas` field +with a list of schema specifications, as shown in the following example: + +```yaml +# ... +spec: + schemas: + - name: app + owner: app +# ... +``` + +Each schema entry supports the following properties: + +- `name` *(mandatory)*: The name of the schema. +- `owner`: The owner of the schema. +- `ensure`: Specifies whether the schema should be present or absent in the + database: + - `present`: Ensures that the schema is installed (default). + - `absent`: Ensures that the schema is removed. + +!!! Info + CloudNativePG manages schemas using the following PostgreSQL’s SQL commands: + [`CREATE SCHEMA`](https://www.postgresql.org/docs/current/sql-createschema.html), + [`DROP SCHEMA`](https://www.postgresql.org/docs/current/sql-dropschema.html), + [`ALTER SCHEMA`](https://www.postgresql.org/docs/current/sql-alterschema.html). + +## Managing Foreign Data Wrappers (FDWs) In a Database + +!!! Info + Foreign Data Wrappers (FDWs) are database-scoped objects that typically + require superuser privileges to create or modify. CloudNativePG provides a + declarative API for managing FDWs, enabling users to define and maintain them + in a controlled, Kubernetes-native way without directly executing SQL commands + or escalating privileges. + +CloudNativePG enables seamless and automated management of PostgreSQL foreign +data wrappers in the target database using declarative configuration. + +To enable this feature, define the `spec.fdws` field with a list of FDW +specifications, as shown in the following example: + +```yaml +# ... +spec: + fdws: + - name: postgres_fdw + usage: + - name: app + - type: grant +# ... +``` + +Each FDW entry supports the following properties: + +- `name`: The name of the foreign data wrapper **(mandatory)**. +- `ensure`: Indicates whether the FDW should be `present` or `absent` in the + database (default is `present`). +- `handler`: The name of the handler function used by the FDW. If not + specified, the default handler defined by the FDW extension (if any) will be + used. +- `validator`: The name of the validator function used by the FDW. If not + specified, the default validator defined by the FDW extension (if any) will + be used. +- `owner`: The owner of the FDW **(must be a superuser)**. +- `usage`: The list of `USAGE` permissions of the FDW, with the following fields: + - `name` : The name of the role to which the usage permission should be + granted or from which it should be revoked. + - `type` : The type of the usage permission. Supports `grant` and `revoke`. +- `options`: A map of FDW-specific options to manage, where each key is the + name of an option. Each option supports the following fields: + - `value`: The string value of the option. + - `ensure`: Indicates whether the option should be `present` or `absent`. + +!!! Info + Both `handler` and `validator` are optional, and if not specified, the + default handler and validator defined by the FDW extension (if any) will be + used. Setting `handler` or `validator` to `"-"` will remove the handler or + validator from the FDW respectively. This follows the PostgreSQL convention, + where "-" denotes the absence of a handler or validator. + +!!! Warning + PostgreSQL restricts ownership of foreign data wrappers to **roles with + superuser privileges only**. Attempting to assign ownership to a non-superuser + (e.g., an app role) will be ignored or rejected, as PostgreSQL does not allow + non-superuser ownership of foreign data wrappers. + +The operator reconciles only the FDWs explicitly listed in `spec.fdws`. Any +existing FDWs not declared in this list are left untouched. + +!!! Info + CloudNativePG manages FDWs using PostgreSQL's native SQL commands: + [`CREATE FOREIGN DATA WRAPPER`](https://www.postgresql.org/docs/current/sql-createforeigndatawrapper.html), + [`ALTER FOREIGN DATA WRAPPER`](https://www.postgresql.org/docs/current/sql-alterforeigndatawrapper.html), + and [`DROP FOREIGN DATA WRAPPER`](https://www.postgresql.org/docs/current/sql-dropforeigndatawrapper.html). + The `ALTER` command supports option updates. + +## Limitations and Caveats + +### Renaming a database + +While CloudNativePG adheres to PostgreSQL’s +[CREATE DATABASE](https://www.postgresql.org/docs/current/sql-createdatabase.html) and +[ALTER DATABASE](https://www.postgresql.org/docs/current/sql-alterdatabase.html) +commands, **renaming databases is not supported**. +Attempting to modify `spec.name` in an existing `Database` object will result +in rejection by Kubernetes. + +### Creating vs. Altering a Database + +- For new databases, CloudNativePG uses the `CREATE DATABASE` statement. +- For existing databases, `ALTER DATABASE` is used to apply changes. + +It is important to note that there are some differences between these two +Postgres commands: in particular, the options accepted by `ALTER` are a subset +of those accepted by `CREATE`. + +!!! Warning + Some fields, such as encoding and collation settings, are immutable in + PostgreSQL. Attempts to modify these fields on existing databases will be + ignored. + +### Replica Clusters + +Database objects declared on replica clusters cannot be enforced, as replicas +lack write privileges. These objects will remain in a pending state until the +replica is promoted. + +### Conflict Resolution + +If two `Database` objects in the same namespace manage the same PostgreSQL +database (i.e., identical `spec.name` and `spec.cluster.name`), the second +object will be rejected. + +Example status message: + +```yaml +status: + applied: false + message: 'reconciliation error: database "one" is already managed by Database object "cluster-example-one"' +``` + +### Postgres Version Differences + +CloudNativePG adheres to PostgreSQL's capabilities. For example, features like +`ICU_RULES` introduced in PostgreSQL 16 are unavailable in earlier versions. +Errors from PostgreSQL will be reflected in the `Database` object's `status`. + +### Manual Changes + +CloudNativePG does not overwrite manual changes to databases. Once reconciled, +a `Database` object will not be reapplied unless its `metadata.generation` +changes, giving flexibility for direct PostgreSQL modifications. diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index 84ec83844c..d84a5d5456 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -1,4 +1,5 @@ # Declarative hibernation + CloudNativePG is designed to keep PostgreSQL clusters up, running and available anytime. @@ -12,14 +13,6 @@ process is running. The declarative hibernation feature enables saving CPU power by removing the database Pods, while keeping the database PVCs. -!!! Note - Declarative hibernation is different from the existing implementation - of [imperative hibernation via the `cnpg` plugin](kubectl-plugin.md#cluster-hibernation). - Imperative hibernation shuts down all Postgres instances in the High - Availability cluster, and keeps a static copy of the PVCs of the primary that - contain `PGDATA` and WALs. The plugin enables to exit the hibernation phase, by - resuming the primary and then recreating all the replicas - if they exist. - ## Hibernation To hibernate a cluster, set the `cnpg.io/hibernation=on` annotation: @@ -58,7 +51,7 @@ $ kubectl cnpg status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.5 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/docs/src/declarative_role_management.md b/docs/src/declarative_role_management.md index 04b328c977..5c177ca9a7 100644 --- a/docs/src/declarative_role_management.md +++ b/docs/src/declarative_role_management.md @@ -1,4 +1,5 @@ -# Database Role Management +# PostgreSQL Role Management + From its inception, CloudNativePG has managed the creation of specific roles required in PostgreSQL instances: diff --git a/docs/src/e2e.md b/docs/src/e2e.md index 416df25e0d..b2410fe456 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -1,11 +1,12 @@ # End-to-End Tests + CloudNativePG is automatically tested after each commit via a suite of **End-to-end (E2E) tests** (or integration tests) which ensure that the operator correctly deploys and manages PostgreSQL clusters. -Kubernetes versions 1.25 through 1.29, and PostgreSQL versions 12 through 16, +Kubernetes versions 1.27 through 1.32, and PostgreSQL versions 13 through 17, are tested for each commit, helping detect bugs at an early stage of the development process. @@ -60,6 +61,7 @@ and the following suite of E2E tests are performed on that cluster: * Replication Slots * Synchronous replication * Scale-up and scale-down of a Cluster + * Logical replication via declarative Publication / Subscription * **Replica clusters** * Bootstrapping a replica cluster from backup @@ -127,3 +129,10 @@ and the following suite of E2E tests are performed on that cluster: * Declarative creation of temporary tablespaces * Backup / recovery from object storage * Backup / recovery from volume snapshots + +* **Declarative databases** + * Declarative creation of databases with default (retain) reclaim policy + * Declarative creation of databases with delete reclaim policy + +* **Major version upgrade** + * Upgrade to the latest major version diff --git a/docs/src/failover.md b/docs/src/failover.md index 8469747104..ab78aa70ab 100644 --- a/docs/src/failover.md +++ b/docs/src/failover.md @@ -1,4 +1,5 @@ # Automated failover + In the case of unexpected errors on the primary for longer than the `.spec.failoverDelay` (by default `0` seconds), the cluster will go into @@ -46,7 +47,8 @@ During the time the failing primary is being shut down: ## RTO and RPO impact -Failover may result in the service being impacted and/or data being lost: +Failover may result in the service being impacted ([RTO](before_you_start.md#rto)) +and/or data being lost ([RPO](before_you_start.md#rpo)): 1. During the time when the primary has started to fail, and before the controller starts failover procedures, queries in transit, WAL writes, checkpoints and @@ -90,3 +92,262 @@ expected outage. Enabling a new configuration option to delay failover provides a mechanism to prevent premature failover for short-lived network or node instability. + +## Failover Quorum (Quorum-based Failover) + +!!! Warning + *Failover quorum* is an experimental feature introduced in version 1.27.0. + Use with caution in production environments. + +Failover quorum is a mechanism that enhances data durability and safety during +failover events in CloudNativePG-managed PostgreSQL clusters. + +Quorum-based failover allows the controller to determine whether to promote a replica +to primary based on the state of a quorum of replicas. +This is useful when stronger data durability is required than the one offered +by [synchronous replication](replication.md#synchronous-replication) and +default automated failover procedures. + +When synchronous replication is not enabled, some data loss is expected and +accepted during failover, as a replica may lag behind the primary when +promoted. + +With synchronous replication enabled, the guarantee is that the application +will not receive explicit acknowledgment of the successful commit of a +transaction until the WAL data is known to be safely received by all required +synchronous standbys. +This is not enough to guarantee that the operator is able to promote the most +advanced replica. + +For example, in a three-node cluster with synchronous replication set to `ANY 1 +(...)`, data is written to the primary and one standby before a commit is +acknowledged. If both the primary and the aligned standby become unavailable +(such as during a network partition), the remaining replica may not have the +latest data. Promoting it could lose some data that the application considered +committed. + +Quorum-based failover addresses this risk by ensuring that failover only occurs +if the operator can confirm the presence of all synchronously committed data in +the instance to promote, and it does not occur otherwise. + +This feature allows users to choose their preferred trade-off between data +durability and data availability. + +Failover quorum can be enabled by setting the annotation +`alpha.cnpg.io/failoverQuorum="true"` in the `Cluster` resource. + +!!! info + When this feature is out of the experimental phase, the annotation + `alpha.cnpg.io/failoverQuorum` will be replaced by a configuration option in + the `Cluster` resource. + +### How it works + +Before promoting a replica to primary, the operator performs a quorum check, +following the principles of the Dynamo `R + W > N` consistency model[^1]. + +In the quorum failover, these values assume the following meaning: + +- `R` is the number of *promotable replicas* (read quorum); +- `W` is the number of replicas that must acknowledge the write before the + `COMMIT` is returned to the client (write quorum); +- `N` is the total number of potentially synchronous replicas; + +*Promotable replicas* are replicas that have these properties: + + - are part of the cluster; + - are able to report their state to the operator; + - are potentially synchronous; + +If `R + W > N`, then we can be sure that among the promotable replicas there is +at least one that has confirmed all the synchronous commits, and we can safely +promote it to primary. If this is not the case, the controller will not promote +any replica to primary, and will wait for the situation to change. + +Users can force a promotion of a replica to primary through the +`kubectl cnpg promote` command even if the quorum check is failing. + +!!! Warning + Manual promotion should only be used as a last resort. Before proceeding, + make sure you fully understand the risk of data loss and carefully consider the + consequences of prioritizing the resumption of write workloads for your + applications. + +An additional CRD is used to track the quorum state of the cluster. A `Cluster` +with the quorum failover enabled will have a `FailoverQuorum` resource with the same +name as the `Cluster` resource. The `FailoverQuorum` CR is created by the +controller when the quorum failover is enabled, and it is updated by the primary +instance during its reconciliation loop, and read by the operator during quorum +checks. It is used to track the latest known configuration of the synchronous +replication. + +!!! Important + Users should not modify the `FailoverQuorum` resource directly. During + PostgreSQL configuration changes, when it is not possible to determine the + configuration, the `FailoverQuorum` resource will be reset, preventing any + failover until the new configuration is applied. + +The `FailoverQuorum` resource works in conjunction with PostgreSQL synchronous +replication. + +!!! Warning + There is no guarantee that `COMMIT` operations returned to the + client but that have not been performed synchronously, such as those made + explicitly disabling synchronous replication with + `SET synchronous_commit TO local`, will be present on a promoted replica. + +### Quorum Failover Example Scenarios + +In the following scenarios, `R` is the number of promotable replicas, `W` is +the number of replicas that must acknowledge a write before commit, and `N` is +the total number of potentially synchronous replicas. The "Failover" column +indicates whether failover is allowed under quorum failover rules. + +#### Scenario 1: Three-node cluster, failing pod(s) + +A cluster with `instances: 3`, `synchronous.number=1`, and +`dataDurability=required`. + +- If only the primary fails, two promotable replicas remain (R=2). + Since `R + W > N` (2 + 1 > 2), failover is allowed and safe. +- If both the primary and one replica fail, only one promotable replica + remains (R=1). Since `R + W = N` (1 + 1 = 2), failover is not allowed to + prevent possible data loss. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 2 | 1 | 2 | ✅ | +| 1 | 1 | 2 | ❌ | + +#### Scenario 2: Three-node cluster, network partition + +A cluster with `instances: 3`, `synchronous.number: 1`, and +`dataDurability: required` experiences a network partition. + +- If the operator can communicate with the primary, no failover occurs. The + cluster can be impacted if the primary cannot reach any standby, since it + won't commit transactions due to synchronous replication requirements. +- If the operator cannot reach the primary but can reach both replicas (R=2), + failover is allowed. If the operator can reach only one replica (R=1), + failover is not allowed, as the synchronous one may be the other one. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 2 | 1 | 2 | ✅ | +| 1 | 1 | 2 | ❌ | + +#### Scenario 3: Five-node cluster, network partition + +A cluster with `instances: 5`, `synchronous.number=2`, and +`dataDurability=required` experiences a network partition. + +- If the operator can communicate with the primary, no failover occurs. The + cluster can be impacted if the primary cannot reach at least two standbys, + as since it won't commit transactions due to synchronous replication + requirements. +- If the operator cannot reach the primary but can reach at least three + replicas (R=3), failover is allowed. If the operator can reach only two + replicas (R=2), failover is not allowed, as the synchronous one may be the + other one. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 3 | 2 | 4 | ✅ | +| 2 | 2 | 4 | ❌ | + +#### Scenario 4: Three-node cluster with remote synchronous replicas + +A cluster with `instances: 3` and remote synchronous replicas defined in +`standbyNamesPre` or `standbyNamesPost`. We assume that the primary is failing. + +This scenario requires an important consideration. Replicas listed in +`standbyNamesPre` or `standbyNamesPost` are not counted in +`R` (they cannot be promoted), but are included in `N` (they may have received +synchronous writes). So, if +`synchronous.number <= len(standbyNamesPre) + len(standbyNamesPost)`, failover +is not possible, as no local replica can be guaranteed to have the required +data. The operator prevents such configurations during validation, but some +invalid configurations are shown below for clarity. + +**Example configurations:** + +Configuration #1 (valid): +```yaml +instances: 3 +postgresql: + synchronous: + method: any + number: 2 + standbyNamesPre: + - angus +``` +In this configuration, when the primary fails, `R = 2` (the local replicas), +`W = 2`, and `N = 3` (2 local replicas + 1 remote), allowing failover. +In case of an additional replica failing (`R = 1`) failover is not allowed. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 3 | 2 | 4 | ✅ | +| 2 | 2 | 4 | ❌ | + +Configuration #2 (invalid): +```yaml +instances: 3 +postgresql: + synchronous: + method: any + number: 1 + maxStandbyNamesFromCluster: 1 + standbyNamesPre: + - angus +``` +In this configuration, `R = 2` (the local replicas), `W = 1`, and `N = 3` +(2 local replicas + 1 remote). +Failover is not possible in this setup, so quorum failover can not be +enabled with this configuration. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 1 | 1 | 2 | ❌ | + +Configuration #3 (invalid): +```yaml +instances: 3 +postgresql: + synchronous: + method: any + number: 1 + maxStandbyNamesFromCluster: 0 + standbyNamesPre: + - angus + - malcolm +``` +In this configuration, `R = 0` (the local replicas), `W = 1`, and `N = 2` +(0 local replicas + 2 remote). +Failover is not possible in this setup, so quorum failover can not be +enabled with this configuration. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 0 | 1 | 2 | ❌ | + +#### Scenario 5: Three-node cluster, preferred data durability, network partition + +Consider a cluster with `instances: 3`, `synchronous.number=1`, and +`dataDurability=preferred` that experiences a network partition. + +- If the operator can communicate with both the primary and the API server, + the primary continues to operate, removing unreachable standbys from the + `synchronous_standby_names` set. +- If the primary cannot reach the operator or API server, a quorum check is + performed. The `FailoverQuorum` status cannot have changed, as the primary cannot + have received new configuration. If the operator can reach both replicas, + failover is allowed (`R=2`). If only one replica is reachable (`R=1`), + failover is not allowed. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 2 | 1 | 2 | ✅ | +| 1 | 1 | 2 | ❌ | + +[^1]: [Dynamo: Amazon’s highly available key-value store](https://www.amazon.science/publications/dynamo-amazons-highly-available-key-value-store) diff --git a/docs/src/failure_modes.md b/docs/src/failure_modes.md index 3f9b746f3c..c26d180235 100644 --- a/docs/src/failure_modes.md +++ b/docs/src/failure_modes.md @@ -1,185 +1,60 @@ # Failure Modes - -This section provides an overview of the major failure scenarios that -PostgreSQL can face on a Kubernetes cluster during its lifetime. - -!!! Important - In case the failure scenario you are experiencing is not covered by this - section, please immediately seek for [professional support](https://cloudnative-pg.io/support/). - -!!! Seealso "Postgres instance manager" - Please refer to the ["Postgres instance manager" section](instance_manager.md) - for more information the liveness and readiness probes implemented by - CloudNativePG. - -## Storage space usage - -The operator will instantiate one PVC for every PostgreSQL instance to store the `PGDATA` content. -A second PVC dedicated to the WAL storage will be provisioned in case `.spec.walStorage` is -specified during cluster initialization. - -Such storage space is set for reuse in two cases: - -- when the corresponding Pod is deleted by the user (and a new Pod will be recreated) -- when the corresponding Pod is evicted and scheduled on another node - -If you want to prevent the operator from reusing a certain PVC you need to -remove the PVC before deleting the Pod. For this purpose, you can use the -following command: - -```sh -kubectl delete -n [namespace] pvc/[cluster-name]-[serial] pod/[cluster-name]-[serial] -``` + !!! Note - If you specified a dedicated WAL volume, it will also have to be deleted during this process. - -```sh -kubectl delete -n [namespace] pvc/[cluster-name]-[serial] pvc/[cluster-name]-[serial]-wal pod/[cluster-name]-[serial] -``` - -For example: - -```sh -$ kubectl delete -n default pvc/cluster-example-1 pvc/cluster-example-1-wal pod/cluster-example-1 -persistentvolumeclaim "cluster-example-1" deleted -persistentvolumeclaim "cluster-example-1-wal" deleted -pod "cluster-example-1" deleted -``` - -## Failure modes - -A pod belonging to a `Cluster` can fail in the following ways: - -* the pod is explicitly deleted by the user; -* the readiness probe on its `postgres` container fails; -* the liveness probe on its `postgres` container fails; -* the Kubernetes worker node is drained; -* the Kubernetes worker node where the pod is scheduled fails. - -Each one of these failures has different effects on the `Cluster` and the -services managed by the operator. - -### Pod deleted by the user - -The operator is notified of the deletion. A new pod belonging to the -`Cluster` will be automatically created reusing the existing PVC, if available, -or starting from a physical backup of the *primary* otherwise. + In previous versions of CloudNativePG, this page included specific failure + scenarios. Since these largely follow standard Kubernetes behavior, we have + streamlined the content to avoid duplication of information that belongs to the + underlying Kubernetes stack and is not specific to CloudNativePG. + +CloudNativePG adheres to standard Kubernetes principles for self-healing and +high availability. We assume familiarity with core Kubernetes concepts such as +storage classes, PVCs, nodes, and Pods. For CloudNativePG-specific details, +refer to the ["Postgres Instance Manager" section](instance_manager.md), which +covers startup, liveness, and readiness probes, as well as the +[self-healing](#self-healing) section below. !!! Important - In case of deliberate deletion of a pod, `PodDisruptionBudget` policies - will not be enforced. + If you are running CloudNativePG in production, we strongly recommend + seeking [professional support](https://cloudnative-pg.io/support/). -Self-healing will happen as soon as the *apiserver* is notified. +## Self-Healing -You can trigger a sudden failure on a given pod of the cluster using the -following generic command: +### Primary Failure -```sh -kubectl delete -n [namespace] \ - pod/[cluster-name]-[serial] --grace-period=1 -``` +If the primary Pod fails: -For example, if you want to simulate a real failure on the primary and trigger -the failover process, you can run: +- The operator promotes the most up-to-date standby with the lowest replication + lag. +- The `-rw` service is updated to point to the new primary. +- The failed Pod is removed from the `-r` and `-rw` services. +- Standby Pods begin replicating from the new primary. +- The former primary uses `pg_rewind` to re-synchronize if its PVC is available; + otherwise, a new standby is created from a backup of the new primary. -```sh -kubectl delete pod [primary pod] --grace-period=1 -``` +### Standby Failure -!!! Warning - Never use `--grace-period=0` in your failover simulation tests, as this - might produce misleading results with your PostgreSQL cluster. A grace - period of 0 guarantees that the pod is immediately removed from the - Kubernetes API server, without first ensuring that the PID 1 process of - the `postgres` container (the instance manager) is shut down - contrary - to what would happen in case of a real failure (e.g. unplug the power cord - cable or network partitioning). - As a result, the operator doesn't see the pod of the primary anymore, and - triggers a failover promoting the most aligned standby, without - the guarantee that the primary had been shut down. - - -### Readiness probe failure - -After 3 failures, the pod will be considered *not ready*. The pod will still -be part of the `Cluster`, no new pod will be created. - -If the cause of the failure can't be fixed, it is possible to delete the pod -manually. Otherwise, the pod will resume the previous role when the failure -is solved. - -Self-healing will happen after three failures of the probe. +If a standby Pod fails: -### Liveness probe failure +- It is removed from the `-r` and `-ro` services. +- The Pod is restarted using its PVC if available; otherwise, a new Pod is + created from a backup of the current primary. +- Once ready, the Pod is re-added to the `-r` and `-ro` services. -After 3 failures, the `postgres` container will be considered failed. The -pod will still be part of the `Cluster`, and the *kubelet* will try to restart -the container. If the cause of the failure can't be fixed, it is possible -to delete the pod manually. - -Self-healing will happen after three failures of the probe. - -### Worker node drained - -The pod will be evicted from the worker node and removed from the service. A -new pod will be created on a different worker node from a physical backup of the -*primary* if the `reusePVC` option of the `nodeMaintenanceWindow` parameter -is set to `off` (default: `on` during maintenance windows, `off` otherwise). - -The `PodDisruptionBudget` may prevent the pod from being evicted if there -is at least another pod that is not ready. - -!!! Note - Single instance clusters prevent node drain when `reusePVC` is - set to `false`. Refer to the [Kubernetes Upgrade section](kubernetes_upgrade.md). +## Manual Intervention -Self-healing will happen as soon as the *apiserver* is notified. - -### Worker node failure - -Since the node is failed, the *kubelet* won't execute the liveness and -the readiness probes. The pod will be marked for deletion after the -toleration seconds configured by the Kubernetes cluster administrator for -that specific failure cause. Based on how the Kubernetes cluster is configured, -the pod might be removed from the service earlier. - -A new pod will be created on a different worker node from a physical backup -of the *primary*. The default value for that parameter in a Kubernetes -cluster is 5 minutes. - -Self-healing will happen after `tolerationSeconds`. - -## Self-healing - -If the failed pod is a standby, the pod is removed from the `-r` service -and from the `-ro` service. -The pod is then restarted using its PVC if available; otherwise, a new -pod will be created from a backup of the current primary. The pod -will be added again to the `-r` service and to the `-ro` service when ready. - -If the failed pod is the primary, the operator will promote the active pod -with status ready and the lowest replication lag, then point the `-rw` service -to it. The failed pod will be removed from the `-r` service and from the -`-rw` service. -Other standbys will start replicating from the new primary. The former -primary will use `pg_rewind` to synchronize itself with the new one if its -PVC is available; otherwise, a new standby will be created from a backup of the -current primary. - -## Manual intervention - -In the case of undocumented failure, it might be necessary to intervene -to solve the problem manually. +For failure scenarios not covered by automated recovery, manual intervention +may be required. !!! Important - In such cases, please do not perform any manual operation without - [professional support](https://cloudnative-pg.io/support/). + Do not perform manual operations without [professional support](https://cloudnative-pg.io/support/). -You can use the `cnpg.io/reconciliationLoop` annotation to temporarily disable -the reconciliation loop for a specific PostgreSQL cluster, as shown below: +### Disabling Reconciliation -``` yaml +To temporarily disable the reconciliation loop for a PostgreSQL cluster, use +the `cnpg.io/reconciliationLoop` annotation: + +```yaml metadata: name: cluster-example-no-reconcile annotations: @@ -188,12 +63,10 @@ spec: # ... ``` -The `cnpg.io/reconciliationLoop` must be used with extreme care -and for the sole duration of the extraordinary/emergency operation. +Use this annotation **with extreme caution** and only during emergency +operations. !!! Warning - Please make sure that you use this annotation only for a limited period of - time and you remove it when the emergency has finished. Leaving this annotation - in a cluster will prevent the operator from issuing any self-healing operation, - such as a failover. - + This annotation should be removed as soon as the issue is resolved. Leaving + it in place prevents the operator from executing self-healing actions, + including failover. diff --git a/docs/src/faq.md b/docs/src/faq.md index 19137d47c6..b80134c895 100644 --- a/docs/src/faq.md +++ b/docs/src/faq.md @@ -1,4 +1,5 @@ # Frequently Asked Questions (FAQ) + ## Running PostgreSQL in Kubernetes @@ -451,8 +452,8 @@ single cluster, namely: - storage: use dedicated storage for each worker node running Postgres Use at least one standby, preferably at least two, so that you can configure -synchronous replication in the cluster, introducing RPO=0 for high -availability. +synchronous replication in the cluster, introducing [RPO](before_you_start.md#rpo)=0 +for high availability. If you do not have availability zones - normally the case of on-premise installations - separate on worker nodes and storage. diff --git a/docs/src/fencing.md b/docs/src/fencing.md index 70617cee7d..25e21b1462 100644 --- a/docs/src/fencing.md +++ b/docs/src/fencing.md @@ -1,4 +1,5 @@ # Fencing + Fencing in CloudNativePG is the ultimate process of protecting the data in one, more, or even all instances of a PostgreSQL cluster when they diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index f455fd79ba..02ce6da8c1 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -1,4 +1,5 @@ # Image Catalog + `ImageCatalog` and `ClusterImageCatalog` are essential resources that empower you to define images for creating a `Cluster`. @@ -32,7 +33,9 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:16.4 + image: ghcr.io/cloudnative-pg/postgresql:16.8 + - major: 17 + image: ghcr.io/cloudnative-pg/postgresql:17.5 ``` **Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:** @@ -47,7 +50,9 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:16.4 + image: ghcr.io/cloudnative-pg/postgresql:16.8 + - major: 17 + image: ghcr.io/cloudnative-pg/postgresql:17.5 ``` A `Cluster` resource has the flexibility to reference either an `ImageCatalog` @@ -85,13 +90,13 @@ specified major release. ### PostgreSQL Container Images You can install the -[latest version of the cluster catalog for the PostgreSQL Container Images](https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog.yaml) +[latest version of the cluster catalog for the PostgreSQL Container Images](https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog-bookworm.yaml) ([cloudnative-pg/postgres-containers](https://github.com/cloudnative-pg/postgres-containers) repository) with: ```shell kubectl apply \ - -f https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog.yaml + -f https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog-bookworm.yaml ``` ### PostGIS Container Images diff --git a/docs/src/imagevolume_extensions.md b/docs/src/imagevolume_extensions.md new file mode 100644 index 0000000000..4b9fba6383 --- /dev/null +++ b/docs/src/imagevolume_extensions.md @@ -0,0 +1,350 @@ +# Image Volume Extensions + + +CloudNativePG supports the **dynamic loading of PostgreSQL extensions** into a +`Cluster` at Pod startup using the [Kubernetes `ImageVolume` feature](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/) +and the `extension_control_path` GUC introduced in PostgreSQL 18, to which this +project contributed. + +This feature allows you to mount a [PostgreSQL extension](https://www.postgresql.org/docs/current/extend-extensions.html), +packaged as an OCI-compliant container image, as a read-only and immutable +volume inside a running pod at a known filesystem path. + +You can make the extension available either globally, using the +[`shared_preload_libraries` option](postgresql_conf.md#shared-preload-libraries), +or at the database level through the `CREATE EXTENSION` command. For the +latter, you can use the [`Database` resource’s declarative extension management](declarative_database_management.md/#managing-extensions-in-a-database) +to ensure consistent, automated extension setup within your PostgreSQL +databases. + +## Benefits + +Image volume extensions decouple the distribution of PostgreSQL operand +container images from the distribution of extensions. This eliminates the +need to define and embed extensions at build time within your PostgreSQL +images—a major adoption blocker for PostgreSQL as a containerized workload, +including from a security and supply chain perspective. + +As a result, you can: + +- Use the [official PostgreSQL `minimal` operand images](https://github.com/cloudnative-pg/postgres-containers?tab=readme-ov-file#minimal-images) + provided by CloudNativePG. +- Dynamically add the extensions you need to your `Cluster` definitions, + without rebuilding or maintaining custom PostgreSQL images. +- Reduce your operational surface by using immutable, minimal, and secure base + images while adding only the extensions required for each workload. + +Extension images must be built according to the +[documented specifications](#image-specifications). + +## Requirements + +To use image volume extensions with CloudNativePG, you need: + +- **PostgreSQL 18 or later**, with support for `extension_control_path`. +- **Kubernetes 1.33**, with the `ImageVolume` feature gate enabled. +- **CloudNativePG-compatible extension container images**, ensuring: + - Matching PostgreSQL major version of the `Cluster` resource. + - Compatible operating system distribution of the `Cluster` resource. + - Matching CPU architecture of the `Cluster` resource. + +## How it works + +Extension images are defined in the `.spec.postgresql.extensions` stanza of a +`Cluster` resource, which accepts an ordered list of extensions to be added to +the PostgreSQL cluster. + +!!! Info + For field-level details, see the + [API reference for `ExtensionConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ExtensionConfiguration). + +Each image volume is mounted at `/extensions/`. + +By default, CloudNativePG automatically manages the relevant GUCs, setting: + +- `extension_control_path` to `/extensions//share`, allowing + PostgreSQL to locate any extension control file within `/extensions//share/extension` +- `dynamic_library_path` to `/extensions//lib` + +These values are appended in the order in which the extensions are defined in +the `extensions` list, ensuring deterministic path resolution within +PostgreSQL. This allows PostgreSQL to discover and load the extension without +requiring manual configuration inside the pod. + +!!! Info + Depending on how your extension container images are built and their layout, + you may need to adjust the default `extension_control_path` and + `dynamic_library_path` values to match the image structure. + +!!! Important + If the extension image includes shared libraries, they must be compiled + with the same PostgreSQL major version, operating system distribution, and CPU + architecture as the PostgreSQL container image used by your cluster, to ensure + compatibility and prevent runtime issues. + +## How to add a new extension + +Adding an extension to a database in CloudNativePG involves a few steps: + +1. Define the extension image in the `Cluster` resource so that PostgreSQL can + discover and load it. +2. Add the library to [`shared_preload_libraries`](postgresql_conf.md#shared-preload-libraries) + if the extension requires it. +3. Declare the extension in the `Database` resource where you want it + installed, if the extension supports `CREATE EXTENSION`. + +!!! Warning + Avoid making changes to extension images and PostgreSQL configuration + settings (such as `shared_preload_libraries`) simultaneously. + First, allow the pod to roll out with the new extension image, then update + the PostgreSQL configuration. + This limitation will be addressed in a future release of CloudNativePG. + +For illustration purposes, this guide uses a simple, fictitious extension named +`foo` that supports `CREATE EXTENSION`. + +### Adding a new extension to a `Cluster` resource + +You can add an `ImageVolume`-based extension to a `Cluster` using the +`.spec.postgresql.extensions` stanza. For example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: foo-18 +spec: + # ... + postgresql: + extensions: + - name: foo + image: + reference: # registry path for your extension image + # ... +``` + +The `name` field is **mandatory** and **must be unique within the cluster**, as +it determines the mount path (`/extensions/foo` in this example). It must +consist of *lowercase alphanumeric characters or hyphens (`-`)* and must start +and end with an alphanumeric character. + +The `image` stanza follows the [Kubernetes `ImageVolume` API](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/). +The `reference` must point to a valid container registry path for the extension +image. + +!!! Important + When a new extension is added to a running `Cluster`, CloudNativePG will + automatically trigger a [rolling update](rolling_update.md) to attach the new + image volume to each pod. Before adding a new extension in production, + ensure you have thoroughly tested it in a staging environment to prevent + configuration issues that could leave your PostgreSQL cluster in an unhealthy + state. + +Once mounted, CloudNativePG will automatically configure PostgreSQL by appending: + +- `/extensions/foo/share` to `extension_control_path` +- `/extensions/foo/lib` to `dynamic_library_path` + +This ensures that the PostgreSQL container is ready to serve the `foo` +extension when requested by a database, as described in the next section. The +`CREATE EXTENSION foo` command, triggered automatically during the +[reconciliation of the `Database` resource](declarative_database_management.md/#managing-extensions-in-a-database), +will work without additional configuration, as PostgreSQL will locate: + +- the extension control file at `/extensions/foo/share/extension/foo.control` +- the shared library at `/extensions/foo/lib/foo.so` + +### Adding a new extension to a `Database` resource + +Once the extension is available in the PostgreSQL instance, you can leverage +declarative databases to [manage the lifecycle of your extensions](declarative_database_management.md#managing-extensions-in-a-database) +within the target database. + +Continuing with the `foo` example, you can request the installation of the +`foo` extension in the `app` database of the `foo-18` cluster using the +following resource definition: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: foo-app +spec: + name: app + owner: app + cluster: + name: foo-18 + extensions: + - name: foo + version: 1.0 +``` + +CloudNativePG will automatically reconcile this resource, executing the +`CREATE EXTENSION foo` command inside the `app` database if it is not +already installed, ensuring your desired state is maintained without manual +intervention. + +## Advanced Topics + +In some cases, the default expected structure may be insufficient for your +extension image, particularly when: + +- The extension requires additional system libraries. +- Multiple extensions are bundled in the same image. +- The image uses a custom directory structure. + +Following the *"convention over configuration"* paradigm, CloudNativePG allows +you to finely control the configuration of each extension image through the +following fields: + +- `extension_control_path`: A list of relative paths within the container image + to be appended to PostgreSQL’s `extension_control_path`, allowing it to + locate extension control files. +- `dynamic_library_path`: A list of relative paths within the container image + to be appended to PostgreSQL’s `dynamic_library_path`, enabling it to locate + shared library files for extensions. +- `ld_library_path`: A list of relative paths within the container image to be + appended to the `LD_LIBRARY_PATH` environment variable of the instance + manager process, allowing PostgreSQL to locate required system libraries at + runtime. + +This flexibility enables you to support complex or non-standard extension +images while maintaining clarity and predictability. + +### Setting Custom Paths + +If your extension image does not use the default `lib` and `share` directories +for its libraries and control files, you can override the defaults by +explicitly setting `extension_control_path` and `dynamic_library_path`. + +For example: + +```yaml +spec: + postgresql: + extensions: + - name: my-extension + extension_control_path: + - my/share/path + dynamic_library_path: + - my/lib/path + image: + reference: # registry path for your extension image +``` + +CloudNativePG will configure PostgreSQL with: + +- `/extensions/my-extension/my/share/path` appended to `extension_control_path` +- `/extensions/my-extension/my/lib/path` appended to `dynamic_library_path` + +This allows PostgreSQL to discover your extension’s control files and shared +libraries correctly, even with a non-standard layout. + +### Multi-extension Images + +You may need to include multiple extensions within the same container image, +adopting a structure where each extension’s files reside in their own +subdirectory. + +For example, to package PostGIS and pgRouting together in a single image, each +in its own subdirectory: + +```yaml +# ... +spec: + # ... + postgresql: + extensions: + - name: geospatial + extension_control_path: + - postgis/share + - pgrouting/share + dynamic_library_path: + - postgis/lib + - pgrouting/lib + # ... + image: + reference: # registry path for your geospatial image + # ... + # ... + # ... +``` + +### Including System Libraries + +Some extensions, such as PostGIS, require system libraries that may not be +present in the base PostgreSQL image. To support these requirements, you can +package the necessary libraries within your extension container image and make +them available to PostgreSQL using the `ld_library_path` field. + +For example, if your extension image includes a `system` directory with the +required libraries: + +```yaml +# ... +spec: + # ... + postgresql: + extensions: + - name: postgis + # ... + ld_library_path: + - syslib + image: + reference: # registry path for your PostGIS image + # ... + # ... + # ... +``` + +CloudNativePG will set the `LD_LIBRARY_PATH` environment variable to include +`/extensions/postgis/system`, allowing PostgreSQL to locate and load these +system libraries at runtime. + +!!! Important + Since `ld_library_path` must be set when the PostgreSQL process starts, + changing this value requires a **cluster restart** for the new value to take effect. + CloudNativePG does not currently trigger this restart automatically; you will need to + manually restart the cluster (e.g., using `cnpg restart`) after modifying `ld_library_path`. + +## Image Specifications + +A standard extension container image for CloudNativePG includes two +required directories at its root: + +- `share`: contains the extension control file (e.g., `.control`) + and any SQL files. +- `lib`: contains the extension's shared library (e.g., `.so`) and + any additional required libraries. + +Following this structure ensures that the extension will be automatically +discoverable and usable by PostgreSQL within CloudNativePG without requiring +manual configuration. + +!!! Important + We encourage PostgreSQL extension developers to publish OCI-compliant extension + images following this layout as part of their artifact distribution, making + their extensions easily consumable within Kubernetes environments. + Ideally, extension images should target a specific operating system + distribution and architecture, be tied to a particular PostgreSQL version, and + be built using the distribution’s native packaging system (for example, using + Debian or RPM packages). This approach ensures consistency, security, and + compatibility with the PostgreSQL images used in your clusters. + +## Caveats + +Currently, adding, removing, or updating an extension image triggers a +restart of the PostgreSQL pods. This behavior is inherited from how +[image volumes](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/) +work in Kubernetes. + +Before performing an extension update, ensure you have: + +- Thoroughly tested the update process in a staging environment. +- Verified that the extension image contains the required upgrade path between + the currently installed version and the target version. +- Updated the `version` field for the extension in the relevant `Database` + resource definition to align with the new version in the image. + +These steps help prevent downtime or data inconsistencies in your PostgreSQL +clusters during extension updates. diff --git a/docs/src/index.md b/docs/src/index.md index 52b51824be..b2af79086a 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,36 +1,43 @@ # CloudNativePG + -**CloudNativePG** is an open-source +CloudNativePG (CNPG) is an open-source [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) designed to manage [PostgreSQL](https://www.postgresql.org/) workloads on any -supported [Kubernetes](https://kubernetes.io) cluster. It supports deployment in -private, public, hybrid, and multi-cloud environments, thanks to -its [distributed topology](replica_cluster.md#distributed-topology) -feature. - -CloudNativePG adheres to DevOps principles and concepts such as declarative -configuration and immutable infrastructure. - -It defines a new Kubernetes resource called `Cluster` representing a PostgreSQL -cluster made up of a single primary and an optional number of replicas that co-exist -in a chosen Kubernetes namespace for High Availability and offloading of -read-only queries. - -Applications that reside in the same Kubernetes cluster can access the -PostgreSQL database using a service solely managed by the operator, without -needing to worry about changes in the primary role following a failover or -switchover. Applications that reside outside the Kubernetes cluster can -leverage the service template capability and a `LoadBalancer` service to expose -PostgreSQL via TCP. Additionally, web applications can take advantage of the -native connection pooler based on PgBouncer. +supported [Kubernetes](https://kubernetes.io) cluster. +It fosters cloud-neutrality through seamless deployment in private, public, +hybrid, and multi-cloud environments via its +[distributed topology](replica_cluster.md#distributed-topology) feature. + +Built around DevOps principles, CloudNativePG embraces declarative +configuration and immutable infrastructure, ensuring reliability and automation +in database management. + +At its core, CloudNativePG introduces a custom Kubernetes resource called +`Cluster`, representing a PostgreSQL cluster with: + +- A single primary instance for write operations. +- Optional replicas for High Availability and read scaling. + +These instances reside within a Kubernetes namespace, allowing applications to +connect seamlessly using operator-managed services. Failovers and switchovers +occur transparently, eliminating the need for manual intervention. + +For applications inside the Kubernetes cluster, CNPG provides a microservice +database approach, enabling co-location of PostgreSQL clusters and applications +in the same namespace for optimized access. +For applications outside the cluster, CNPG offers flexible connectivity through +service templates and `LoadBalancer` services for direct TCP exposure. +Additionally, web applications can take advantage of the native connection +pooler based on PgBouncer. CloudNativePG was originally built by [EDB](https://www.enterprisedb.com), then released open source under Apache License 2.0. -The [source code repository is in Github](https://github.com/cloudnative-pg/cloudnative-pg). +The [source code repository is in GitHub](https://github.com/cloudnative-pg/cloudnative-pg). !!! Note Based on the [Operator Capability Levels model](operator_capability_levels.md), - users can expect a **"Level V - Auto Pilot"** set of capabilities from the + users can expect a "Level V - Auto Pilot" subset of capabilities from the CloudNativePG Operator. ## Supported Kubernetes distributions @@ -54,10 +61,12 @@ in three different flavors: - Debian 12 distroless - Red Hat UBI 9 micro (suffix `-ubi9`) -- Red Hat UBI 8 micro (suffix `-ubi8`) Red Hat UBI images are primarily intended for OLM consumption. +All container images are signed and include SBOM and provenance attestations, +provided separately for each architecture. + ### Operands The PostgreSQL operand container images are available for all @@ -65,79 +74,115 @@ The PostgreSQL operand container images are available for all across multiple architectures, directly from the [`postgres-containers` project's GitHub Container Registry](https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql). -Daily jobs ensure that critical vulnerabilities (CVEs) in the entire stack are +The [`minimal`](https://github.com/cloudnative-pg/postgres-containers#minimal-images) +and [`standard`](https://github.com/cloudnative-pg/postgres-containers#standard-images) +container images are signed and include SBOM and provenance attestations, +provided separately for each architecture. + +Weekly jobs ensure that critical vulnerabilities (CVEs) in the entire stack are promptly addressed. Additionally, the community provides images for the [PostGIS extension](postgis.md). ## Main features -* Direct integration with Kubernetes API server for High Availability, - without requiring an external tool -* Self-Healing capability, through: - * failover of the primary instance by promoting the most aligned replica - * automated recreation of a replica -* Planned switchover of the primary instance by promoting a selected replica -* Scale up/down capabilities -* Definition of an arbitrary number of instances (minimum 1 - one primary server) -* Definition of the *read-write* service, to connect your applications to the only primary server of the cluster -* Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads -* Declarative management of PostgreSQL configuration, including certain popular - Postgres extensions through the cluster `spec`: `pgaudit`, `auto_explain`, - `pg_stat_statements`, and `pg_failover_slots` -* Declarative management of Postgres roles, users and groups -* Support for Local Persistent Volumes with PVC templates -* Reuse of Persistent Volumes storage in Pods -* Separate volumes for WAL files and tablespaces -* Declarative management of Postgres tablespaces, including temporary tablespaces -* Rolling updates for PostgreSQL minor versions -* In-place or rolling updates for operator upgrades -* TLS connections and client certificate authentication -* Support for custom TLS certificates (including integration with cert-manager) -* Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) -* Backups on volume snapshots (where supported by the underlying storage classes) -* Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) -* Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores -* Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL -* Online import of existing PostgreSQL databases, including major upgrades of PostgreSQL, through PostgreSQL native logical replication (imperative, via the `cnpg` plugin) -* Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way -* Hibernation of a PostgreSQL cluster in a declarative way -* Support for quorum-based and priority-based Synchronous Replication -* Support for HA physical replication slots at cluster level -* Synchronization of user defined physical replication slots -* Backup from a standby -* Backup retention policies (based on recovery window, only on object stores) -* Parallel WAL archiving and restore to allow the database to keep up with WAL - generation on high write systems -* Support tagging backup files uploaded to an object store to enable optional - retention management at the object store layer -* Replica clusters for PostgreSQL distributed topologies spanning multiple - Kubernetes clusters, enabling private, public, hybrid, and multi-cloud - architectures with support for controlled switchover. -* Delayed Replica clusters -* Connection pooling with PgBouncer -* Support for node affinity via `nodeSelector` -* Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187) -* Standard output logging of PostgreSQL error messages in JSON format -* Automatically set `readOnlyRootFilesystem` security context for pods -* `cnpg` plugin for `kubectl` -* Simple bind and search+bind LDAP client authentication -* Multi-arch format container images -* OLM installation +- Direct integration with the Kubernetes API server for High Availability, + eliminating the need for external tools. +- Self-healing capabilities, including: + - Automated failover by promoting the most aligned replica. + - Automatic recreation of failed replicas. +- Planned switchover of the primary instance by promoting a selected replica. +- Declarative management of key PostgreSQL configurations, including: + - PostgreSQL settings. + - Roles, users, and groups. + - Databases, extensions, schemas, and foreign data wrappers (FDW). + - Tablespaces (including temporary tablespaces). +- Flexible instance definition, supporting any number of instances (minimum 1 + primary server). +- Scale-up/down capabilities to dynamically adjust cluster size. +- Read-Write and Read-Only Services, ensuring applications connect correctly: + - *Read-Write Service*: Routes connections to the primary server. + - *Read-Only Service*: Distributes connections among replicas for read workloads. +- Support for quorum-based and priority-based PostgreSQL Synchronous + Replication. +- Replica clusters enabling PostgreSQL distributed topologies across multiple + Kubernetes clusters (private, public, hybrid, and multi-cloud). +- Delayed Replica clusters for point-in-time access to historical data. +- Persistent volume management, including: + - Support for Local Persistent Volumes with PVC templates. + - Reuse of Persistent Volumes storage in Pods. + - Separate volumes for WAL files and tablespaces. +- Backup and Recovery via CNPG-I Plugins: + - Pluggable architecture for continuous physical backup and recovery. + - Hot and cold base backups. + - WAL archiving. + - Full and Point-In-Time Recovery (PITR). + - Scheduled and on-demand backups. + - Backup from standbys to reduce primary load. +- Community-Supported Barman Cloud Plugin: + - WAL archiving to object stores with support for full/PITR recovery. + - Retention policies based on configurable recovery windows. + - Supported as a CNPG-I plugin (recommended approach). +- Native Backup Methods: + - Continuous backup and full/PITR recovery via volume snapshots (if + supported by the storage class). + - Native integration with Barman Cloud for object store backups via + `.spec.backup.barmanObjectStore` (*deprecated since v1.26*). +- Offline in-place major upgrades of PostgreSQL +- Offline and online import of PostgreSQL databases, including major upgrades: + - *Offline Import*: Direct restore from existing databases. + - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. +- High Availability physical replication slots, including synchronization of + user-defined replication slots and logical decoding failover. +- Parallel WAL archiving and restore, ensuring high-performance data + synchronization in high-write environments. +- TLS support, including: + - Secure connections and client certificate authentication. + - Custom TLS certificates (integrated with `cert-manager`). +- Startup and readiness probes, including replica probes based on desired lag + from the primary. +- Declarative rolling updates for: + - PostgreSQL minor versions. + - Operator upgrades (in-place or rolling updates). +- Standard output logging of PostgreSQL error messages in JSON format for + easier integration with log aggregation tools. +- Prometheus-compatible metrics exporter (`metrics` port 9187) for custom + monitoring. +- `cnpg` plugin for `kubectl` to simplify cluster operations. +- Cluster hibernation for resource efficiency in inactive states. +- Fencing of PostgreSQL clusters (full cluster or subset) to isolate instances + when needed. +- Connection pooling with PgBouncer for improved database efficiency. +- OLM (Operator Lifecycle Manager) installation support for streamlined + deployments. +- Multi-arch container images, including Software Bill of Materials (SBOM) and + provenance attestations for security compliance. !!! Info CloudNativePG does not use `StatefulSet`s for managing data persistence. - Rather, it manages persistent volume claims (PVCs) directly. If you are - curious, read ["Custom Pod Controller"](controller.md) to know more. + Instead, it directly manages Persistent Volume Claims (PVCs). + See ["Custom Pod Controller"](controller.md) for more details. ## About this guide -Follow the instructions in the ["Quickstart"](quickstart.md) to test CloudNativePG -on a local Kubernetes cluster using Kind, or Minikube. +Follow the instructions in the ["Quickstart"](quickstart.md) to test +CloudNativePG on a local Kubernetes cluster using Kind, or Minikube. In case you are not familiar with some basic terminology on Kubernetes and PostgreSQL, please consult the ["Before you start" section](before_you_start.md). -*[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) +The CloudNativePG documentation is licensed under a Creative Commons +Attribution 4.0 International License. + +--- + +*[Postgres, PostgreSQL, and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) are trademarks or registered trademarks of the PostgreSQL Community Association of Canada, and used with their permission.* + +--- + +CloudNativePG is a +[Cloud Native Computing Foundation Sandbox project](https://www.cncf.io/sandbox-projects/). + +![](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png?raw=true) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index f6a7e1b14b..4f6919df69 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -1,4 +1,5 @@ # Installation and upgrades + ## Installation on Kubernetes @@ -7,18 +8,19 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.24.0.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.27/releases/cnpg-1.27.0.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.24.0.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.27/releases/cnpg-1.27.0.yaml ``` You can verify that with: ```sh -kubectl get deployment -n cnpg-system cnpg-controller-manager +kubectl rollout status deployment \ + -n cnpg-system cnpg-controller-manager ``` ### Using the `cnpg` plugin for `kubectl` @@ -72,7 +74,7 @@ specific minor release, you can just run: ```sh curl -sSfL \ - https://raw.githubusercontent.com/cloudnative-pg/artifacts/main/manifests/operator-manifest.yaml | \ + https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.27/manifests/operator-manifest.yaml | \ kubectl apply --server-side -f - ``` @@ -86,10 +88,13 @@ The operator can be installed using the provided [Helm chart](https://github.com ### Using OLM -CloudNativePG can also be installed using the -[Operator Lifecycle Manager (OLM)](https://olm.operatorframework.io/docs/) +CloudNativePG can also be installed via the [Operator Lifecycle Manager (OLM)](https://olm.operatorframework.io/docs/) directly from [OperatorHub.io](https://operatorhub.io/operator/cloudnative-pg). +For deployments on Red Hat OpenShift, EDB offers and fully supports a certified +version of CloudNativePG, available through the +[Red Hat OpenShift Container Platform](https://catalog.redhat.com/software/container-stacks/detail/653fd4035eece8598f66d97b). + ## Details about the deployment In Kubernetes, the operator is by default installed in the `cnpg-system` @@ -152,13 +157,14 @@ by applying the manifest of the newer version for plain Kubernetes installations, or using the native package manager of the used distribution (please follow the instructions in the above sections). -The second step is automatically executed after having updated the controller, -by default triggering a rolling update of every deployed PostgreSQL instance to -use the new instance manager. The rolling update procedure culminates with a -switchover, which is controlled by the `primaryUpdateStrategy` option, by -default set to `unsupervised`. When set to `supervised`, users need to complete -the rolling update by manually promoting a new instance through the `cnpg` -plugin for `kubectl`. + +The second step is automatically triggered after updating the controller. By +default, this initiates a rolling update of every deployed PostgreSQL cluster, +upgrading one instance at a time to use the new instance manager. The rolling +update concludes with a switchover, which is governed by the +`primaryUpdateStrategy` option. The default value, `unsupervised`, completes +the switchover automatically. If set to `supervised`, the user must manually +promote the new primary instance using the `cnpg` plugin for `kubectl`. !!! Seealso "Rolling updates" This process is discussed in-depth on the [Rolling Updates](rolling_update.md) page. @@ -166,13 +172,31 @@ plugin for `kubectl`. !!! Important In case `primaryUpdateStrategy` is set to the default value of `unsupervised`, an upgrade of the operator will trigger a switchover on your PostgreSQL cluster, - causing a (normally negligible) downtime. + causing a (normally negligible) downtime. If your PostgreSQL Cluster has only one + instance, the instance will be automatically restarted as `supervised` value is + not supported for `primaryUpdateStrategy`. In either case, your applications will + have to reconnect to PostgreSQL. The default rolling update behavior can be replaced with in-place updates of the instance manager. This approach does not require a restart of the PostgreSQL instance, thereby avoiding a switchover within the cluster. This feature, which is disabled by default, is described in detail below. +### Spread Upgrades + +By default, all PostgreSQL clusters are rolled out simultaneously, which may +lead to a spike in resource usage, especially when managing multiple clusters. +CloudNativePG provides two configuration options at the [operator level](operator_conf.md) +that allow you to introduce delays between cluster roll-outs or even between +instances within the same cluster, helping to distribute resource usage over +time: + +- `CLUSTERS_ROLLOUT_DELAY`: Defines the number of seconds to wait between + roll-outs of different PostgreSQL clusters (default: `0`). +- `INSTANCES_ROLLOUT_DELAY`: Defines the number of seconds to wait between + roll-outs of individual instances within the same PostgreSQL cluster (default: + `0`). + ### In-place updates of the instance manager By default, CloudNativePG issues a rolling update of the cluster @@ -229,19 +253,115 @@ When versions are not directly upgradable, the old version needs to be removed before installing the new one. This won't affect user data but only the operator itself. -### Upgrading to 1.24.0 or 1.23.4 + + + +### Upgrading to 1.27.0 or 1.26.1 + +!!! Important + We strongly recommend that all CloudNativePG users upgrade to version + 1.27.0, or at least to the latest stable version of your current minor release + (e.g., 1.26.1). + +Version 1.27 introduces a change in the default behavior of the +[liveness probe](instance_manager.md#liveness-probe): it now enforces the +[shutdown of an isolated primary](instance_manager.md#primary-isolation) +within the `livenessProbeTimeout` (30 seconds). + +If this behavior is not suitable for your environment, you can disable the +*isolation check* in the liveness probe with the following configuration: + +```yaml +spec: + probes: + liveness: + isolationCheck: + enabled: false +``` + +### Upgrading to 1.26 from a previous minor version + +!!! Important + We strongly recommend that all CloudNativePG users upgrade to version + 1.26.1, or at a minimum, to the latest stable version of your current minor + release (for example, 1.25.x). + +!!! Warning + Due to changes in the startup probe for the manager component + ([#6623](https://github.com/cloudnative-pg/cloudnative-pg/pull/6623)), + upgrading the operator will trigger a restart of your PostgreSQL clusters, + even if in-place updates are enabled (`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES=true`). + Your applications will need to reconnect to PostgreSQL after the upgrade. + +#### Deprecation of backup metrics and fields in the `Cluster` `.status` + +With the transition to a backup and recovery agnostic approach based on CNPG-I +plugins in CloudNativePG, which began with version 1.26.0 for Barman Cloud, we +are starting the deprecation period for the following fields in the `.status` +section of the `Cluster` resource: + +- `firstRecoverabilityPoint` +- `firstRecoverabilityPointByMethod` +- `lastSuccessfulBackup` +- `lastSuccessfulBackupByMethod` +- `lastFailedBackup` + +The following Prometheus metrics are also deprecated: + +- `cnpg_collector_first_recoverability_point` +- `cnpg_collector_last_failed_backup_timestamp` +- `cnpg_collector_last_available_backup_timestamp` + +!!! Warning + If you have migrated to a plugin-based backup and recovery solution such as + Barman Cloud, these fields and metrics are no longer synchronized and will + not be updated. Users still relying on the in-core support for Barman Cloud + and volume snapshots can continue to use these fields for the time being. + +Under the new plugin-based approach, multiple backup methods can operate +simultaneously, each with its own timeline for backup and recovery. For +example, some plugins may provide snapshots without WAL archiving, while others +support continuous archiving. + +Because of this flexibility, maintaining centralized status fields in the +`Cluster` resource could be misleading or confusing, as they would not +accurately represent the state across all configured backup methods. +For this reason, these fields are being deprecated. + +Instead, each plugin is responsible for exposing its own backup status +information and providing metrics back to the instance manager for monitoring +and operational awareness. + +#### Declarative Hibernation in the `cnpg` plugin + +In this release, the `cnpg` plugin for `kubectl` transitions from an imperative +to a [declarative approach for cluster hibernation](declarative_hibernation.md). +The `hibernate on` and `hibernate off` commands are now convenient shortcuts +that apply declarative changes to enable or disable hibernation. +The `hibernate status` command has been removed, as its purpose is now +fulfilled by the standard `status` command. + +### Upgrading to 1.25 from a previous minor version !!! Warning Every time you are upgrading to a higher minor release, make sure you go through the release notes and upgrade instructions of all the intermediate minor releases. For example, if you want to move - from 1.22.x to 1.24, make sure you go through the release notes - and upgrade instructions for 1.23 and 1.24. + from 1.23.x to 1.25, make sure you go through the release notes + and upgrade instructions for 1.24 and 1.25. + +No changes to existing 1.24 cluster configurations are required when upgrading +to 1.25. + +### Upgrading to 1.24 from a previous minor version #### From Replica Clusters to Distributed Topology @@ -282,63 +402,3 @@ distributed PostgreSQL setup. Ensure the following steps are taken: For more information, please refer to the ["Distributed Topology" section for replica clusters](replica_cluster.md#distributed-topology). - -### Upgrading to 1.23 from a previous minor version - -#### User defined replication slots - -CloudNativePG now offers automated synchronization of all replication slots -defined on the primary to any standby within the High Availability (HA) -cluster. - -If you manually manage replication slots on a standby, it is essential to -exclude those replication slots from synchronization. Failure to do so may -result in CloudNativePG removing them from the standby. To implement this -exclusion, utilize the following YAML configuration. In this example, -replication slots with a name starting with 'foo' are prevented from -synchronization: - -```yaml -... - replicationSlots: - synchronizeReplicas: - enabled: true - excludePatterns: - - "^foo" -``` - -Alternatively, if you prefer to disable the synchronization mechanism entirely, -use the following configuration: - -```yaml -... - replicationSlots: - synchronizeReplicas: - enabled: false -``` - -#### Server-side apply of manifests - -To ensure compatibility with Kubernetes 1.29 and upcoming versions, -CloudNativePG now mandates the utilization of -["Server-side apply"](https://kubernetes.io/docs/reference/using-api/server-side-apply/) -when deploying the operator manifest. - -While employing this installation method poses no challenges for new -deployments, updating existing operator manifests using the `--server-side` -option may result in errors resembling the example below: - -``` text -Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using.. -``` - -If such errors arise, they can be resolved by explicitly specifying the -`--force-conflicts` option to enforce conflict resolution: - -```sh -kubectl apply --server-side --force-conflicts -f -``` - -Henceforth, `kube-apiserver` will be automatically acknowledged as a recognized -manager for the CRDs, eliminating the need for any further manual intervention -on this matter. diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index d285838ce7..e7ac50f1d3 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -1,11 +1,12 @@ # Postgres instance manager + CloudNativePG does not rely on an external tool for failover management. It simply relies on the Kubernetes API server and a native key component called: the **Postgres instance manager**. The instance manager takes care of the entire lifecycle of the PostgreSQL -leading process (also known as `postmaster`). +server process (also known as `postmaster`). When you create a new cluster, the operator makes a Pod per instance. The field `.spec.instances` specifies how many instances to create. @@ -15,38 +16,311 @@ main container, which in turn runs the PostgreSQL instance. During the lifetime of the Pod, the instance manager acts as a backend to handle the [startup, liveness and readiness probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). -## Startup, liveness and readiness probes +## Startup Probe -The startup and liveness probes rely on `pg_isready`, while the readiness -probe checks if the database is up and able to accept connections using the -superuser credentials. +The startup probe ensures that a PostgreSQL instance, whether a primary or +standby, has fully started. -The readiness probe is positive when the Pod is ready to accept traffic. -The liveness probe controls when to restart the container once -the startup probe interval has elapsed. +!!! Info + By default, the startup probe uses + [`pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html). + However, the behavior can be customized by specifying a different startup + strategy. + +While the startup probe is running, the liveness and readiness probes remain +disabled. Following Kubernetes standards, if the startup probe fails, the +kubelet will terminate the container, which will then be restarted. + +The `.spec.startDelay` parameter specifies the maximum time, in seconds, +allowed for the startup probe to succeed. + +By default, the `startDelay` is set to `3600` seconds. It is recommended to +adjust this setting based on the time PostgreSQL needs to fully initialize in +your specific environment. + +!!! Warning + Setting `.spec.startDelay` too low can cause the liveness probe to activate + prematurely, potentially resulting in unnecessary Pod restarts if PostgreSQL + hasn’t fully initialized. + +CloudNativePG configures the startup probe with the following default parameters: + +```yaml +failureThreshold: FAILURE_THRESHOLD +periodSeconds: 10 +successThreshold: 1 +timeoutSeconds: 5 +``` + +The `failureThreshold` value is automatically calculated by dividing +`startDelay` by `periodSeconds`. + +You can customize any of the probe settings in the `.spec.probes.startup` +section of your configuration. + +!!! Warning + Be sure that any custom probe settings are tailored to your cluster's + operational requirements to avoid unintended disruptions. + +!!! Info + For more details on probe configuration, refer to the + [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ProbeWithStrategy). + +If you manually specify `.spec.probes.startup.failureThreshold`, it will +override the default behavior and disable the automatic use of `startDelay`. + +For example, the following configuration explicitly sets custom probe +parameters, bypassing `startDelay`: + +```yaml +# ... snip +spec: + probes: + startup: + periodSeconds: 3 + timeoutSeconds: 3 + failureThreshold: 10 +``` + +### Startup Probe Strategy + +In certain scenarios, you may need to customize the startup strategy for your +PostgreSQL cluster. For example, you might delay marking a replica as started +until it begins streaming from the primary or define a replication lag +threshold that must be met before considering the replica ready. + +To accommodate these requirements, CloudNativePG extends the +`.spec.probes.startup` stanza with two optional parameters: + +- `type`: specifies the criteria for considering the probe successful. Accepted + values, in increasing order of complexity/depth, include: + + - `pg_isready`: marks the probe as successful when the `pg_isready` command + exits with `0`. This is the default for primary instances and replicas. + - `query`: marks the probe as successful when a basic query is executed on + the `postgres` database locally. + - `streaming`: marks the probe as successful when the replica begins + streaming from its source and meets the specified lag requirements (details + below). + +- `maximumLag`: defines the maximum acceptable replication lag, measured in bytes + (expressed as Kubernetes quantities). This parameter is only applicable when + `type` is set to `streaming`. If `maximumLag` is not specified, the replica is + considered successfully started as soon as it begins streaming. + +!!! Important + The `.spec.probes.startup.maximumLag` option is validated and enforced only + during the startup phase of the pod, meaning it applies exclusively when the + replica is starting. + +!!! Warning + Incorrect configuration of the `maximumLag` option can cause continuous + failures of the startup probe, leading to repeated replica restarts. Ensure + you understand how this option works and configure appropriate values for + `failureThreshold` and `periodSeconds` to give the replica enough time to + catch up with its source. + +The following example requires a replica to have a maximum lag of 16Mi from the +source to be considered started: + +```yaml +# +probes: + startup: + type: streaming + maximumLag: 16Mi +``` + +## Liveness Probe + +The liveness probe begins after the startup probe successfully completes. Its +primary role is to ensure the PostgreSQL instance manager is operating +correctly. + +Following Kubernetes standards, if the liveness probe fails, the kubelet will +terminate the container, which will then be restarted. + +The amount of time before a Pod is classified as not alive is configurable via +the `.spec.livenessProbeTimeout` parameter. + +CloudNativePG configures the liveness probe with the following default +parameters: + +```yaml +failureThreshold: FAILURE_THRESHOLD +periodSeconds: 10 +successThreshold: 1 +timeoutSeconds: 5 +``` + +The `failureThreshold` value is automatically calculated by dividing +`livenessProbeTimeout` by `periodSeconds`. + +By default, `.spec.livenessProbeTimeout` is set to `30` seconds. This means the +liveness probe will report a failure if it detects three consecutive probe +failures, with a 10-second interval between each check. + +You can customize any of the probe settings in the `.spec.probes.liveness` +section of your configuration. + +!!! Warning + Be sure that any custom probe settings are tailored to your cluster's + operational requirements to avoid unintended disruptions. + +!!! Info + For more details on probe configuration, refer to the + [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + +If you manually specify `.spec.probes.liveness.failureThreshold`, it will +override the default behavior and disable the automatic use of +`livenessProbeTimeout`. + +For example, the following configuration explicitly sets custom probe +parameters, bypassing `livenessProbeTimeout`: + +```yaml +# ... snip +spec: + probes: + liveness: + periodSeconds: 3 + timeoutSeconds: 3 + failureThreshold: 10 +``` + +### Primary Isolation + +CloudNativePG 1.27 introduces an additional behavior for the liveness +probe of a PostgreSQL primary, which will report a failure if **both** of the +following conditions are met: + +1. The instance manager cannot reach the Kubernetes API server +2. The instance manager cannot reach **any** other instance via the instance manager’s REST API + +The effect of this behavior is to consider an isolated primary to be not alive and subsequently **shut it down** when the liveness probe fails. + +It is **enabled by default** and can be disabled by adding the following: + +```yaml +spec: + probes: + liveness: + isolationCheck: + enabled: false +``` !!! Important - The liveness and readiness probes will report a failure if the probe command - fails three times with a 10-second interval between each check. + Be aware that the default liveness probe settings—automatically derived from `livenessProbeTimeout`—might + be aggressive (30 seconds). As such, we recommend explicitly setting the + liveness probe configuration to suit your environment. + +The spec also accepts two optional network settings: `requestTimeout` +and `connectionTimeout`, both defaulting to `1000` (in milliseconds). +In cloud environments, you may need to increase these values. +For example: + +```yaml +spec: + probes: + liveness: + isolationCheck: + enabled: true + requestTimeout: "2000" + connectionTimeout: "2000" +``` + +## Readiness Probe + +The readiness probe starts once the startup probe has successfully completed. +Its primary purpose is to check whether the PostgreSQL instance is ready to +accept traffic and serve requests at any point during the pod's lifecycle. + +!!! Info + By default, the readiness probe uses + [`pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html). + However, the behavior can be customized by specifying a different readiness + strategy. + +Following Kubernetes standards, if the readiness probe fails, the pod will be +marked unready and will not receive traffic from any services. An unready pod +is also ineligible for promotion during automated failover scenarios. + +CloudNativePG uses the following default configuration for the readiness probe: + +```yaml +failureThreshold: 3 +periodSeconds: 10 +successThreshold: 1 +timeoutSeconds: 5 +``` + +If the default settings do not suit your requirements, you can fully customize +the readiness probe by specifying parameters in the `.spec.probes.readiness` +stanza. For example: + +```yaml +# ... snip +spec: + probes: + readiness: + periodSeconds: 3 + timeoutSeconds: 3 + failureThreshold: 10 +``` -The liveness probe detects if the PostgreSQL instance is in a -broken state and needs to be restarted. The value in `startDelay` is used -to delay the probe's execution, preventing an -instance with a long startup time from being restarted. +!!! Warning + Ensure that any custom probe settings are aligned with your cluster’s + operational requirements to prevent unintended disruptions. -The amount of time needed for a Pod to be classified as not alive is -configurable in the `.spec.livenessProbeTimeout` parameter, that -defaults to 30 seconds. +!!! Info + For more information on configuring probes, see the + [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ProbeWithStrategy). -The interval (in seconds) after the Pod has started before the liveness -probe starts working is expressed in the `.spec.startDelay` parameter, -which defaults to 3600 seconds. The correct value for your cluster is -related to the time needed by PostgreSQL to start. +### Readiness Probe Strategy + +In certain scenarios, you may need to customize the readiness strategy for your +cluster. For example, you might delay marking a replica as ready until it +begins streaming from the primary or define a maximum replication lag threshold +before considering the replica ready. + +To accommodate these requirements, CloudNativePG extends the +`.spec.probes.readiness` stanza with two optional parameters: `type` and +`maximumLag`. Please refer to the [Startup Probe Strategy](#startup-probe-strategy) +section for detailed information on these options. + +!!! Important + Unlike the startup probe, the `.spec.probes.readiness.maximumLag` option is + continuously monitored. A lagging replica may become unready if this setting is + not appropriately tuned. !!! Warning - If `.spec.startDelay` is too low, the liveness probe will start working - before the PostgreSQL startup is complete, and the Pod could be restarted - prematurely. + Incorrect configuration of the `maximumLag` option can lead to repeated + readiness probe failures, causing serious consequences, such as: + + - Exclusion of the replica from key operator features, such as promotion + during failover or participation in synchronous replication quorum. + - Disruptions in read/read-only services. + - In longer failover times scenarios, replicas might be declared unready, + leading to a cluster stall requiring manual intervention. + +!!! Recommendation + Use the `streaming` and `maximumLag` options with extreme caution. If + you're unfamiliar with PostgreSQL replication, rely on the default + strategy. Seek professional advice if unsure. + +The following example requires a replica to have a maximum lag of 64Mi from the +source to be considered ready. It also provides approximately 300 seconds (30 +failures × 10 seconds) for the startup probe to succeed: + +```yaml +# +probes: + readiness: + type: streaming + maximumLag: 64Mi + failureThreshold: 30 + periodSeconds: 10 +``` ## Shutdown control @@ -73,8 +347,9 @@ seconds. !!! Important In order to avoid any data loss in the Postgres cluster, which impacts - the database RPO, don't delete the Pod where the primary instance is running. - In this case, perform a switchover to another instance first. + the database [RPO](before_you_start.md#rpo), don't delete the Pod where + the primary instance is running. In this case, perform a switchover to + another instance first. ### Shutdown of the primary during a switchover @@ -88,11 +363,12 @@ the time given to the former primary to shut down gracefully and archive all the WAL files. By default it is set to `3600` (1 hour). !!! Warning - The `.spec.switchoverDelay` option affects the RPO and RTO of your - PostgreSQL database. Setting it to a low value, might favor RTO over RPO - but lead to data loss at cluster level and/or backup level. On the contrary, - setting it to a high value, might remove the risk of data loss while leaving - the cluster without an active primary for a longer time during the switchover. + The `.spec.switchoverDelay` option affects the [RPO](before_you_start.md#rpo) + and [RTO](before_you_start.md#rto) of your PostgreSQL database. Setting it to + a low value, might favor RTO over RPO but lead to data loss at cluster level + and/or backup level. On the contrary, setting it to a high value, might remove + the risk of data loss while leaving the cluster without an active primary for a + longer time during the switchover. ## Failover @@ -102,7 +378,7 @@ Please refer to the ["Failover" section](failover.md) for details. ## Disk Full Failure Storage exhaustion is a well known issue for PostgreSQL clusters. -The [PostgreSQL documentation](https://www.postgresql.org/docs/current/disk-full.html) +The [PostgreSQL documentation](https://www.postgresql.org/docs/current/diskusage.html#DISK-FULL) highlights the possible failure scenarios and the importance of monitoring disk usage to prevent it from becoming full. @@ -130,6 +406,7 @@ That allows a human administrator to address the root cause. In such a case, if supported by the storage class, the quickest course of action is currently to: + 1. Expand the storage size of the full PVC 2. Increase the size in the `Cluster` resource to the same value diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md old mode 100755 new mode 100644 index 4902585ae6..5526f009af --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1,4 +1,5 @@ # Kubectl Plugin + CloudNativePG provides a plugin for `kubectl` to manage a cluster in Kubernetes. @@ -30,55 +31,72 @@ them in your systems. #### Debian packages -For example, let's install the 1.22.2 release of the plugin, for an Intel based +For example, let's install the 1.27.0 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. -``` sh -$ wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.22.1/kubectl-cnpg_1.22.2_linux_x86_64.deb +```sh +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0/kubectl-cnpg_1.27.0_linux_x86_64.deb \ + --output-document kube-plugin.deb ``` -Then, install from the local file using `dpkg`: +Then, with superuser privileges, install from the local file using `dpkg`: -``` sh -$ dpkg -i kubectl-cnpg_1.22.2_linux_x86_64.deb -(Reading database ... 702524 files and directories currently installed.) -Preparing to unpack kubectl-cnpg_1.22.2_linux_x86_64.deb ... -Unpacking cnpg (1.22.2) over (1.22.2) ... -Setting up cnpg (1.22.2) .. +```console +$ sudo dpkg -i kube-plugin.deb +Selecting previously unselected package cnpg. +(Reading database ... 6688 files and directories currently installed.) +Preparing to unpack kube-plugin.deb ... +Unpacking cnpg (1.27.0) ... +Setting up cnpg (1.27.0) ... ``` #### RPM packages -As in the example for `.deb` packages, let's install the 1.22.2 release for an +As in the example for `.rpm` packages, let's install the 1.27.0 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. -``` sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.22.2/kubectl-cnpg_1.22.2_linux_x86_64.rpm \ +```sh +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0/kubectl-cnpg_1.27.0_linux_x86_64.rpm \ --output kube-plugin.rpm ``` -Then install with `yum`, and you're ready to use: +Then, with superuser privileges, install with `yum`, and you're ready to use: -``` sh -$ yum --disablerepo=* localinstall kube-plugin.rpm -yum --disablerepo=* localinstall kube-plugin.rpm +```console +$ sudo yum --disablerepo=* localinstall kube-plugin.rpm Failed to set locale, defaulting to C.UTF-8 Dependencies resolved. ==================================================================================================== Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.22.2-1 @commandline 17 M + cnpg x86_64 1.27.0 @commandline 20 M Transaction Summary ==================================================================================================== Install 1 Package -Total size: 14 M -Installed size: 43 M +Total size: 20 M +Installed size: 78 M Is this ok [y/N]: y ``` +### Using the Arch Linux User Repository (AUR) Package + +To install the plugin from the [AUR](https://aur.archlinux.org/packages/kubectl-cnpg), follow these steps: + +```sh +git clone https://aur.archlinux.org/kubectl-cnpg.git +cd kubectl-cnpg +makepkg -si +``` + +Or use your favorite AUR-helper, for example [paru](https://github.com/Morganamilo/paru): + +```sh +paru -S kubectl-cnpg +``` + ### Using Krew If you already have [Krew](https://krew.sigs.k8s.io/) installed, you can simply @@ -126,19 +144,19 @@ CloudNativePG Plugin is currently built for the following operating system and architectures: * Linux - * amd64 - * arm 5/6/7 - * arm64 - * s390x - * ppc64le + * amd64 + * arm 5/6/7 + * arm64 + * s390x + * ppc64le * macOS - * amd64 - * arm64 + * amd64 + * arm64 * Windows - * 386 - * amd64 - * arm 5/6/7 - * arm64 + * 386 + * amd64 + * arm 5/6/7 + * arm64 ### Configuring auto-completion @@ -146,7 +164,7 @@ To configure auto-completion for the plugin, a helper shell script needs to be installed into your current PATH. Assuming the latter contains `/usr/local/bin`, this can be done with the following commands: -```shell +```sh cat > kubectl_complete-cnpg < +```sh +kubectl cnpg COMMAND [ARGS...] ``` !!! Note The plugin automatically detects if the standard output channel is connected to a terminal. In such cases, it may add ANSI colors to the command output. To disable colors, use the `--color=never` option with the command. + ### Generation of installation manifests The `cnpg` plugin can be used to generate the YAML manifest for the @@ -184,7 +203,7 @@ installation namespace, namespaces to watch, and so on. For details and available options, run: -```shell +```sh kubectl cnpg install generate --help ``` @@ -205,7 +224,7 @@ The main options are: An example of the `generate` command, which will generate a YAML manifest that will install the operator, is as follows: -```shell +```sh kubectl cnpg install generate \ -n king \ --version 1.23 \ @@ -245,163 +264,115 @@ cluster, including: from the `Current LSN` field in the instances status as it is taken at two different time intervals. -```shell +```sh kubectl cnpg status sandbox ``` -```shell -Cluster in healthy state -Name: sandbox -Namespace: default -System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 -Primary instance: sandbox-2 -Instances: 3 -Ready instances: 3 -Current Write LSN: 3AF/EAFA6168 (Timeline: 8 - WAL File: 00000008000003AF00000075) +```output +Cluster Summary +Name: default/sandbox +System ID: 7423474350493388827 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +Primary instance: sandbox-1 +Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 1m14s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/604DE38 (Timeline: 1 - WAL File: 000000010000000000000006) Continuous Backup status -First Point of Recoverability: Not Available -Working WAL archiving: OK -Last Archived WAL: 00000008000003AE00000079 @ 2021-12-14T10:16:29.340047Z -Last Failed WAL: - - -Certificates Status -Certificate Name Expiration Date Days Left Until Expiration ----------------- --------------- -------------------------- -cluster-example-ca 2022-05-05 15:02:42 +0000 UTC 87.23 -cluster-example-replication 2022-05-05 15:02:42 +0000 UTC 87.23 -cluster-example-server 2022-05-05 15:02:42 +0000 UTC 87.23 +Not configured Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -sandbox-1 3AF/EB0524F0 3AF/EB011760 3AF/EAFEDE50 3AF/EAFEDE50 00:00:00.004461 00:00:00.007901 00:00:00.007901 streaming quorum 1 -sandbox-3 3AF/EB0524F0 3AF/EB030B00 3AF/EB030B00 3AF/EB011760 00:00:00.000977 00:00:00.004194 00:00:00.008252 streaming quorum 1 +Replication Slots Enabled +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot +---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- +sandbox-2 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active +sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version ----- ------------- ----------- ---------------- ------ --- --------------- -sandbox-1 302 GB 3AF/E9FFFFE0 Standby (sync) OK Guaranteed 1.11.0 -sandbox-2 302 GB 3AF/EAFA6168 Primary OK Guaranteed 1.11.0 -sandbox-3 302 GB 3AF/EBAD5D18 Standby (sync) OK Guaranteed 1.11.0 +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ---------------- ------ --- --------------- ---- +sandbox-1 0/604DE38 Primary OK BestEffort 1.27.0 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker ``` -You can also get a more verbose version of the status by adding -`--verbose` or just `-v` +If you require more detailed status information, use the `--verbose` option (or +`-v` for short). The level of detail increases each time the flag is repeated: -```shell +```sh kubectl cnpg status sandbox --verbose ``` -```shell -Cluster in healthy state -Name: sandbox -Namespace: default -System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 -Primary instance: sandbox-2 -Instances: 3 -Ready instances: 3 -Current Write LSN: 3B1/61DE3158 (Timeline: 8 - WAL File: 00000008000003B100000030) - -PostgreSQL Configuration -archive_command = '/controller/manager wal-archive --log-destination /controller/log/postgres.json %p' -archive_mode = 'on' -archive_timeout = '5min' -checkpoint_completion_target = '0.9' -checkpoint_timeout = '900s' -cluster_name = 'sandbox' -dynamic_shared_memory_type = 'sysv' -full_page_writes = 'on' -hot_standby = 'true' -jit = 'on' -listen_addresses = '*' -log_autovacuum_min_duration = '1s' -log_checkpoints = 'on' -log_destination = 'csvlog' -log_directory = '/controller/log' -log_filename = 'postgres' -log_lock_waits = 'on' -log_min_duration_statement = '1000' -log_rotation_age = '0' -log_rotation_size = '0' -log_statement = 'ddl' -log_temp_files = '1024' -log_truncate_on_rotation = 'false' -logging_collector = 'on' -maintenance_work_mem = '2GB' -max_connections = '1000' -max_parallel_workers = '32' -max_replication_slots = '32' -max_wal_size = '15GB' -max_worker_processes = '32' -pg_stat_statements.max = '10000' -pg_stat_statements.track = 'all' -port = '5432' -shared_buffers = '16GB' -shared_memory_type = 'sysv' -shared_preload_libraries = 'pg_stat_statements' -ssl = 'on' -ssl_ca_file = '/controller/certificates/client-ca.crt' -ssl_cert_file = '/controller/certificates/server.crt' -ssl_key_file = '/controller/certificates/server.key' -synchronous_standby_names = 'ANY 1 ("sandbox-1","sandbox-3")' -unix_socket_directories = '/controller/run' -wal_keep_size = '512MB' -wal_level = 'logical' -wal_log_hints = 'on' -cnpg.config_sha256 = '3cfa683e23fe513afaee7c97b50ce0628e0cc634bca8b096517538a9a4428efc' - -PostgreSQL HBA Rules - -# Grant local access -local all all peer map=local - -# Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert -hostssl all cnpg_pooler_pgbouncer all cert - -# Otherwise use the default authentication method -host all all all scram-sha-256 - +```output +Cluster Summary +Name: default/sandbox +System ID: 7423474350493388827 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +Primary instance: sandbox-1 +Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 2m4s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/6053720 (Timeline: 1 - WAL File: 000000010000000000000006) Continuous Backup status -First Point of Recoverability: Not Available -Working WAL archiving: OK -Last Archived WAL: 00000008000003B00000001D @ 2021-12-14T10:20:42.272815Z -Last Failed WAL: - +Not configured + +Physical backups +No running physical backups found Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -sandbox-1 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000333 00:00:00.000333 00:00:00.005484 streaming quorum 1 -sandbox-3 3B1/61E26448 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000756 00:00:00.000756 00:00:00.000756 streaming quorum 1 +Replication Slots Enabled +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot Slot Restart LSN Slot WAL Status Slot Safe WAL Size +---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- ---------------- --------------- ------------------ +sandbox-2 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL +sandbox-3 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL + +Unmanaged Replication Slot Status +No unmanaged replication slots found + +Managed roles status +No roles managed + +Tablespaces status +No managed tablespaces + +Pod Disruption Budgets status +Name Role Expected Pods Current Healthy Minimum Desired Healthy Disruptions Allowed +---- ---- ------------- --------------- ----------------------- ------------------- +sandbox replica 2 2 1 1 +sandbox-primary primary 1 1 1 0 Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version ----- ------------- ----------- ---------------- ------ --- --------------- -sandbox-1 3B1/610204B8 Standby (sync) OK Guaranteed 1.11.0 -sandbox-2 3B1/61DE3158 Primary OK Guaranteed 1.11.0 -sandbox-3 3B1/62618470 Standby (sync) OK Guaranteed 1.11.0 +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ---------------- ------ --- --------------- ---- +sandbox-1 0/6053720 Primary OK BestEffort 1.27.0 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker ``` +With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can +also view PostgreSQL configuration, HBA settings, and certificates. + The command also supports output in `yaml` and `json` format. ### Promote The meaning of this command is to `promote` a pod in the cluster to primary, so you -can start with maintenance work or test a switch-over situation in your cluster +can start with maintenance work or test a switch-over situation in your cluster: -```shell -kubectl cnpg promote cluster-example cluster-example-2 +```sh +kubectl cnpg promote CLUSTER CLUSTER-INSTANCE ``` -Or you can use the instance node number to promote +Or you can use the instance node number to promote: -```shell -kubectl cnpg promote cluster-example 2 +```sh +kubectl cnpg promote CLUSTER INSTANCE ``` ### Certificates @@ -410,21 +381,21 @@ Clusters created using the CloudNativePG operator work with a CA to sign a TLS authentication certificate. To get a certificate, you need to provide a name for the secret to store -the credentials, the cluster name, and a user for this certificate +the credentials, the cluster name, and a user for this certificate: -```shell -kubectl cnpg certificate cluster-cert --cnpg-cluster cluster-example --cnpg-user appuser +```sh +kubectl cnpg certificate cluster-cert --cnpg-cluster CLUSTER --cnpg-user USER ``` -After the secret it's created, you can get it using `kubectl` +After the secret it's created, you can get it using `kubectl`: -```shell +```sh kubectl get secret cluster-cert ``` And the content of the same in plain text using the following commands: -```shell +```sh kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]' ``` @@ -434,19 +405,19 @@ The `kubectl cnpg restart` command can be used in two cases: - requesting the operator to orchestrate a rollout restart for a certain cluster. This is useful to apply - configuration changes to cluster dependent objects, such as ConfigMaps + configuration changes to cluster dependent objects, such as `ConfigMaps` containing custom monitoring queries. - request a single instance restart, either in-place if the instance is the cluster's primary or deleting and recreating the pod if it is a replica. -```shell +```sh # this command will restart a whole cluster in a rollout fashion -kubectl cnpg restart [clusterName] +kubectl cnpg restart CLUSTER # this command will restart a single instance, according to the policy above -kubectl cnpg restart [clusterName] [pod] +kubectl cnpg restart CLUSTER INSTANCE ``` If the in-place restart is requested but the change cannot be applied without @@ -465,8 +436,8 @@ to cluster dependent objects, such as ConfigMaps containing custom monitoring qu The following command will reload all configurations for a given cluster: -```shell -kubectl cnpg reload [cluster_name] +```sh +kubectl cnpg reload CLUSTER ``` ### Maintenance @@ -490,13 +461,13 @@ all the cluster in the list. If you want to set in maintenance all the PostgreSQL in your Kubernetes cluster, just need to write the following command: -```shell +```sh kubectl cnpg maintenance set --all-namespaces ``` And you'll have the list of all the cluster to update -```shell +```output The following are the new values for the clusters Namespace Cluster Name Maintenance reusePVC --------- ------------ ----------- -------- @@ -548,32 +519,32 @@ default time-stamped filename is created for the zip file. namespace as the clusters. E.g. the default installation namespace is cnpg-system -```shell -kubectl cnpg report operator -n +```sh +kubectl cnpg report operator -n cnpg-system ``` results in -```shell +```output Successfully written report to "report_operator_.zip" (format: "yaml") ``` With the `-f` flag set: -```shell -kubectl cnpg report operator -n -f reportRedacted.zip +```sh +kubectl cnpg report operator -n cnpg-system -f reportRedacted.zip ``` Unzipping the file will produce a time-stamped top-level folder to keep the directory tidy: -```shell +```sh unzip reportRedacted.zip ``` will result in: -```shell +```output Archive: reportRedacted.zip creating: report_operator_/ creating: report_operator_/manifests/ @@ -583,13 +554,13 @@ Archive: reportRedacted.zip inflating: report_operator_/manifests/validating-webhook-configuration.yaml inflating: report_operator_/manifests/mutating-webhook-configuration.yaml inflating: report_operator_/manifests/webhook-service.yaml - inflating: report_operator_/manifests/cnpg-ca-secret.yaml - inflating: report_operator_/manifests/cnpg-webhook-cert.yaml + inflating: report_operator_/manifests/cnpg-ca-secret(secret).yaml + inflating: report_operator_/manifests/cnpg-webhook-cert(secret).yaml ``` If you activated the `--logs` option, you'd see an extra subdirectory: -```shell +```output Archive: report_operator_.zip creating: report_operator_/operator-logs/ @@ -602,14 +573,14 @@ Archive: report_operator_.zip In all cases, it will also try to get the CURRENT operator logs. If current and previous logs are available, it will show them both. -``` json -====== Begin of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +```output +====== Beginning of Previous Log ===== +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0","build":{"Version":"1.27.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0","build":{"Version":"1.27.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` @@ -618,9 +589,9 @@ and `====== End …` guards, with no content inside. You can verify that the confidential information is REDACTED by default: -```shell +```sh cd report_operator_/manifests/ -head cnpg-ca-secret.yaml +head cnpg-ca-secret\(secret\).yaml ``` ```yaml @@ -637,20 +608,20 @@ metadata: With the `-S` (`--stopRedaction`) option activated, secrets are shown: -```shell -kubectl cnpg report operator -n -f reportNonRedacted.zip -S +```sh +kubectl cnpg report operator -n cnpg-system -f reportNonRedacted.zip -S ``` You'll get a reminder that you're about to view confidential information: -```shell +```output WARNING: secret Redaction is OFF. Use it with caution Successfully written report to "reportNonRedacted.zip" (format: "yaml") ``` -```shell +```sh unzip reportNonRedacted.zip -head cnpg-ca-secret.yaml +head cnpg-ca-secret\(secret\).yaml ``` ```yaml @@ -686,25 +657,25 @@ so the `-S` is disabled. Usage: -```shell -kubectl cnpg report cluster [flags] +```sh +kubectl cnpg report cluster CLUSTER [flags] ``` Note that, unlike the `operator` sub-command, for the `cluster` sub-command you need to provide the cluster name, and very likely the namespace, unless the cluster is in the default one. -```shell -kubectl cnpg report cluster example -f report.zip -n example_namespace +```sh +kubectl cnpg report cluster CLUSTER -f report.zip [-n NAMESPACE] ``` and then: -```shell +```sh unzip report.zip ``` -```shell +```output Archive: report.zip creating: report_cluster_example_/ creating: report_cluster_example_/manifests/ @@ -716,21 +687,21 @@ Archive: report.zip Remember that you can use the `--logs` flag to add the pod and job logs to the ZIP. -```shell -kubectl cnpg report cluster example -n example_namespace --logs +```sh +kubectl cnpg report cluster CLUSTER [-n NAMESPACE] --logs ``` will result in: -```shell +```output Successfully written report to "report_cluster_example_.zip" (format: "yaml") ``` -```shell +```sh unzip report_cluster_.zip ``` -```shell +```output Archive: report_cluster_example_.zip creating: report_cluster_example_/ creating: report_cluster_example_/manifests/ @@ -765,7 +736,7 @@ the `-h` flag: `kubectl cnpg logs cluster -h` The `logs` command will display logs in JSON-lines format, unless the -`--timestamps` flag is used, in which case, a human readable timestamp will be +`--timestamps` flag is used, in which case, a human-readable timestamp will be prepended to each line. In this case, lines will no longer be valid JSON, and tools such as `jq` may not work as desired. @@ -788,7 +759,7 @@ The `--tail` flag can be used to specify how many log lines will be retrieved from each pod in the cluster. By default, the `logs cluster` sub-command will display all the logs from each pod in the cluster. If combined with the "follow" flag `-f`, the number of logs specified by `--tail` will be retrieved until the -current time, and and from then the new logs will be followed. +current time, and from then the new logs will be followed. NOTE: unlike other `cnpg` plugin commands, the `-f` is used to denote "follow" rather than specify a file. This keeps with the convention of `kubectl logs`, @@ -796,24 +767,24 @@ which takes `-f` to mean the logs should be followed. Usage: -```shell -kubectl cnpg logs cluster [flags] +```sh +kubectl cnpg logs cluster CLUSTER [flags] ``` Using the `-f` option to follow: -```shell -kubectl cnpg report cluster cluster-example -f +```sh +kubectl cnpg report cluster CLUSTER -f ``` Using `--tail` option to display 3 lines from each pod and the `-f` option to follow: -```shell -kubectl cnpg report cluster cluster-example -f --tail 3 +```sh +kubectl cnpg report cluster CLUSTER -f --tail 3 ``` -``` json +```output {"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] LOG: ending log output to stderr","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} {"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] HINT: Future log output will go to log destination \"csvlog\".","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} … @@ -822,12 +793,83 @@ kubectl cnpg report cluster cluster-example -f --tail 3 With the `-o` option omitted, and with `--output` specified: -``` sh -kubectl cnpg logs cluster cluster-example --output my-cluster.log +```console +$ kubectl cnpg logs cluster CLUSTER --output my-cluster.log Successfully written logs to "my-cluster.log" ``` +#### Pretty + +The `pretty` sub-command reads a log stream from standard input, formats it +into a human-readable output, and attempts to sort the entries by timestamp. + +It can be used in combination with `kubectl cnpg logs cluster`, as +shown in the following example: + +```console +$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting tablespace manager +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting external server manager +[...] +``` + +Alternatively, it can be used in combination with other commands that produce +CNPG logs in JSON format, such as `stern`, or `kubectl logs`, as in the +following example: + +```console +$ kubectl logs cluster-example-1 | kubectl cnpg logs pretty +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting tablespace manager +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting external server manager +[...] +``` + +The `pretty` sub-command also supports advanced log filtering, allowing users +to display logs for specific pods or loggers, or to filter logs by severity +level. +Here's an example: + +```console +$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --pods cluster-example-1 --loggers postgres --log-level info +2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: redirecting log output to logging collector process +2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] HINT: Future log output will appear in directory "/controller/log"... +2024-10-15T17:35:00.510 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: ending log output to stderr +2024-10-15T17:35:00.510 INFO cluster-example-1 postgres ending log output to stderr +[...] +``` + +The `pretty` sub-command will try to sort the log stream, +to make logs easier to reason about. In order to achieve this, it gathers the +logs into groups, and within groups it sorts by timestamp. This is the only +way to sort interactively, as `pretty` may be piped from a command in "follow" +mode. The sub-command will add a group separator line, `---`, at the end of +each sorted group. The size of the grouping can be configured via the +`--sorting-group-size` flag (default: 1000), as illustrated in the following example: + +```console +$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --sorting-group-size=3 +2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Starting CloudNativePG Instance Manager +2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Checking for free disk space for WALs before starting PostgreSQL +2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting tablespace manager +--- +2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting external server manager +2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting controller-runtime manager +2024-10-15T17:35:20.439 INFO cluster-example-2 instance-manager Starting EventSource +--- +[...] +``` + +To explore all available options, use the `-h` flag for detailed explanations +of the supported flags and their usage. + +!!! Info + You can also increase the verbosity of the log by adding more `-v` options. + ### Destroy The `kubectl cnpg destroy` command helps remove an instance and all the @@ -843,80 +885,66 @@ detached PVCs. Usage: -``` -kubectl cnpg destroy [CLUSTER_NAME] [INSTANCE_ID] +```sh +kubectl cnpg destroy CLUSTER INSTANCE ``` The following example removes the `cluster-example-2` pod and the associated PVCs: -``` +```sh kubectl cnpg destroy cluster-example 2 ``` -### Cluster hibernation +### Cluster Hibernation -Sometimes you may want to suspend the execution of a CloudNativePG `Cluster` -while retaining its data, then resume its activity at a later time. We've -called this feature **cluster hibernation**. +There are times when you may need to temporarily suspend a CloudNativePG +`Cluster` while preserving its data, allowing you to resume operations later. +This feature is known as **cluster hibernation**. -Hibernation is only available via the `kubectl cnpg hibernate [on|off]` -commands. +Hibernation is managed declaratively using the `cnpg.io/hibernation` +annotation. -Hibernating a CloudNativePG cluster means destroying all the resources -generated by the cluster, except the PVCs that belong to the PostgreSQL primary -instance. - -You can hibernate a cluster with: - -``` -kubectl cnpg hibernate on -``` +!!! Info + For more details, see the ["Declarative Hibernation"](declarative_hibernation.md) + documentation page. -This will: +To simplify the process, the `cnpg` plugin for `kubectl` provides a `hibernate` +command, which acts as a convenient shortcut for applying the annotation. -1. shutdown every PostgreSQL instance -2. detach the PVCs containing the data of the primary instance, and annotate - them with the latest database status and the latest cluster configuration -3. delete the `Cluster` resource, including every generated resource - except - the aforementioned PVCs +To hibernate a cluster, run: -When hibernated, a CloudNativePG cluster is represented by just a group of -PVCs, in which the one containing the `PGDATA` is annotated with the latest -available status, including content from `pg_controldata`. +```sh +kubectl cnpg hibernate on CLUSTER +``` -!!! Warning - A cluster having fenced instances cannot be hibernated, as fencing is - part of the hibernation procedure too. +This command applies the `cnpg.io/hibernation=on` annotation to the cluster, +suspending its execution. -In case of error the operator will not be able to revert the procedure. You can -still force the operation with: +To resume a hibernated cluster, use: +```sh +kubectl cnpg hibernate off CLUSTER ``` -kubectl cnpg hibernate on cluster-example --force -``` - -A hibernated cluster can be resumed with: -``` -kubectl cnpg hibernate off -``` +This will remove the hibernation state by setting `cnpg.io/hibernation=off`. -Once the cluster has been hibernated, it's possible to show the last -configuration and the status that PostgreSQL had after it was shut down. -That can be done with: +You can check the cluster’s status at any time with: -``` -kubectl cnpg hibernate status +```sh +kubectl cnpg status CLUSTER ``` +This will display the current state of the cluster, including whether it is +hibernated. + ### Benchmarking the database with pgbench Pgbench can be run against an existing PostgreSQL cluster with following command: -``` -kubectl cnpg pgbench -- --time 30 --client 1 --jobs 1 +```sh +kubectl cnpg pgbench CLUSTER -- --time 30 --client 1 --jobs 1 ``` Refer to the [Benchmarking pgbench section](benchmarking.md#pgbench) for more @@ -924,10 +952,10 @@ details. ### Benchmarking the storage with fio -fio can be run on an existing storage class with following command: +`fio` can be run on an existing storage class with following command: -``` -kubectl cnpg fio -n +```sh +kubectl cnpg fio FIO_JOB_NAME [-n NAMESPACE] ``` Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details. @@ -939,20 +967,20 @@ an existing Postgres cluster by creating a new `Backup` resource. The following example requests an on-demand backup for a given cluster: -```shell -kubectl cnpg backup [cluster_name] +```sh +kubectl cnpg backup CLUSTER ``` or, if using volume snapshots: -```shell -kubectl cnpg backup [cluster_name] -m volumeSnapshot +```sh +kubectl cnpg backup CLUSTER -m volumeSnapshot ``` The created backup will be named after the request time: -```shell -kubectl cnpg backup cluster-example +```console +$ kubectl cnpg backup cluster-example backup/cluster-example-20230121002300 created ``` @@ -970,7 +998,7 @@ the configuration settings. ### Launching psql -The `kubectl cnpg psql` command starts a new PostgreSQL interactive front-end +The `kubectl cnpg psql CLUSTER` command starts a new PostgreSQL interactive front-end process (psql) connected to an existing Postgres cluster, as if you were running it from the actual pod. This means that you will be using the `postgres` user. @@ -978,10 +1006,10 @@ it from the actual pod. This means that you will be using the `postgres` user. As you will be connecting as `postgres` user, in production environments this method should be used with extreme care, by authorized personnel only. -```shell -kubectl cnpg psql cluster-example +```console +$ kubectl cnpg psql cluster-example -psql (16.4 (Debian 16.4-1.pgdg110+1)) +psql (17.5 (Debian 17.5-1.pgdg110+1)) Type "help" for help. postgres=# @@ -990,9 +1018,10 @@ postgres=# By default, the command will connect to the primary instance. The user can select to work against a replica by using the `--replica` option: -```shell -kubectl cnpg psql --replica cluster-example -psql (16.4 (Debian 16.4-1.pgdg110+1)) +```console +$ kubectl cnpg psql --replica cluster-example + +psql (17.5 (Debian 17.5-1.pgdg110+1)) Type "help" for help. @@ -1110,20 +1139,20 @@ command. The basic structure of this command is as follows: ```sh kubectl cnpg publication create \ - --publication \ - [--external-cluster ] - [options] + --publication PUBLICATION_NAME \ + [--external-cluster EXTERNAL_CLUSTER] + LOCAL_CLUSTER [options] ``` There are two primary use cases: - With `--external-cluster`: Use this option to create a publication on an external cluster (i.e. defined in the `externalClusters` stanza). The commands - will be issued from the ``, but the publication will be for the - data in ``. + will be issued from the `LOCAL_CLUSTER`, but the publication will be for the + data in `EXTERNAL_CLUSTER`. - Without `--external-cluster`: Use this option to create a publication in the - `` PostgreSQL `Cluster` (by default, the `app` database). + `LOCAL_CLUSTER` PostgreSQL `Cluster` (by default, the `app` database). !!! Warning When connecting to an external cluster, ensure that the specified user has @@ -1157,7 +1186,7 @@ to `source-cluster`. We can run: -``` sh +```sh kubectl cnpg publication create destination-cluster \ --external-cluster=source-cluster --all-tables ``` @@ -1167,7 +1196,7 @@ the SQL commands on the `destination-cluster`. Or instead, we can run: -``` sh +```sh kubectl cnpg publication create source-cluster \ --publication=app --all-tables ``` @@ -1189,9 +1218,9 @@ following command structure: ```sh kubectl cnpg publication drop \ - --publication \ - [--external-cluster ] - [options] + --publication PUBLICATION_NAME \ + [--external-cluster EXTERNAL_CLUSTER] + LOCAL_CLUSTER [options] ``` To access further details and precise instructions, use the following command: @@ -1227,15 +1256,15 @@ command. The basic structure of this command is as follows: ```sh kubectl cnpg subscription create \ - --subscription \ - --publication \ - --external-cluster \ - [options] + --subscription SUBSCRIPTION_NAME \ + --publication PUBLICATION_NAME \ + --external-cluster EXTERNAL_CLUSTER \ + LOCAL_CLUSTER [options] ``` This command configures a subscription directed towards the specified publication in the designated external cluster, as defined in the -`externalClusters` stanza of the ``. +`externalClusters` stanza of the `LOCAL_CLUSTER`. For additional information and detailed instructions, type the following command: @@ -1252,7 +1281,7 @@ As in the section on publications, we have a `source-cluster` and a The following command: -``` sh +```sh kubectl cnpg subscription create destination-cluster \ --external-cluster=source-cluster \ --publication=app --subscription=app @@ -1277,8 +1306,8 @@ You can drop a `SUBSCRIPTION` with the following command structure: ```sh kubectl cnpg subcription drop \ - --subscription \ - [options] + --subscription SUBSCRIPTION_NAME \ + LOCAL_CLUSTER [options] ``` To access further details and precise instructions, use the following command: @@ -1306,8 +1335,8 @@ You can use the command as shown below: ```sh kubectl cnpg subscription sync-sequences \ - --subscription \ - + --subscription SUBSCRIPTION_NAME \ + LOCAL_CLUSTER ``` For comprehensive details and specific instructions, utilize the following @@ -1326,7 +1355,7 @@ subscription, both called `app`, are already present. The following command will synchronize the sequences involved in the `app` subscription, from the source cluster into the destination cluster. -``` sh +```sh kubectl cnpg subscription sync-sequences destination-cluster \ --subscription=app ``` @@ -1342,3 +1371,118 @@ The `cnpg` plugin can be easily integrated in [K9s](https://k9scli.io/), a popular terminal-based UI to interact with Kubernetes clusters. See [`k9s/plugins.yml`](samples/k9s/plugins.yml) for details. + +## Permissions required by the plugin + +The plugin requires a set of Kubernetes permissions that depends on the command +to execute. These permissions may affect resources and sub-resources like Pods, +PDBs, PVCs, and enable actions like `get`, `delete`, `patch`. The following +table contains the full details: + +| Command | Resource Permissions | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| backup | clusters: get
backups: create | +| certificate | clusters: get
secrets: get,create | +| destroy | pods: get,delete
jobs: delete,list
PVCs: list,delete,update | +| fencing | clusters: get,patch
pods: get | +| fio | PVCs: create
configmaps: create
deployment: create | +| hibernate | clusters: get,patch,delete
pods: list,get,delete
pods/exec: create
jobs: list
PVCs: get,list,update,patch,delete | +| install | none | +| logs | clusters: get
pods: list
pods/log: get | +| maintenance | clusters: get,patch,list
| +| pgadmin4 | clusters: get
configmaps: create
deployments: create
services: create
secrets: create | +| pgbench | clusters: get
jobs: create
| +| promote | clusters: get
clusters/status: patch
pods: get | +| psql | pods: get,list
pods/exec: create | +| publication | clusters: get
pods: get,list
pods/exec: create | +| reload | clusters: get,patch | +| report cluster | clusters: get
pods: list
pods/log: get
jobs: list
events: list
PVCs: list | +| report operator | configmaps: get
deployments: get
events: list
pods: list
pods/log: get
secrets: get
services: get
mutatingwebhookconfigurations: list[^1]
validatingwebhookconfigurations: list[^1]
If OLM is present on the K8s cluster, also:
clusterserviceversions: list
installplans: list
subscriptions: list | +| restart | clusters: get,patch
pods: get,delete | +| status | clusters: get
pods: list
pods/exec: create
pods/proxy: create
PDBs: list | +| subscription | clusters: get
pods: get,list
pods/exec: create | +| version | none | + +[^1]: The permissions are cluster scope ClusterRole resources. + +///Footnotes Go Here/// + +Additionally, assigning the `list` permission on the `clusters` will enable +autocompletion for multiple commands. + +### Role examples + +It is possible to create roles with restricted permissions. +The following example creates a role that only has access to the cluster logs: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cnpg-log +rules: + - verbs: + - get + apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - verbs: + - list + apiGroups: + - '' + resources: + - pods + - verbs: + - get + apiGroups: + - '' + resources: + - pods/log +``` + +The next example shows a role with the minimal permissions required to get +the cluster status using the plugin's `status` command: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cnpg-status +rules: + - verbs: + - get + apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - verbs: + - list + apiGroups: + - '' + resources: + - pods + - verbs: + - create + apiGroups: + - '' + resources: + - pods/exec + - verbs: + - create + apiGroups: + - '' + resources: + - pods/proxy + - verbs: + - list + apiGroups: + - policy + resources: + - poddisruptionbudgets +``` + +!!! Important + Keeping the verbs restricted per `resources` and per `apiGroups` helps to + prevent inadvertently granting more than intended permissions. diff --git a/docs/src/kubernetes_upgrade.md b/docs/src/kubernetes_upgrade.md index e14d5b271a..5cab4f8f84 100644 --- a/docs/src/kubernetes_upgrade.md +++ b/docs/src/kubernetes_upgrade.md @@ -1,4 +1,5 @@ # Kubernetes Upgrade and Maintenance + Maintaining an up-to-date Kubernetes cluster is crucial for ensuring optimal performance and security, particularly for self-managed clusters, especially diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 74b9d247ae..290a116bcd 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -1,4 +1,5 @@ # Labels and annotations + Resources in Kubernetes are organized in a flat structure, with no hierarchical information or relationship between them. However, such resources and objects @@ -28,62 +29,73 @@ they're inherited by all resources created by it (including pods). ## Predefined labels -These predefined labels are managed by CloudNativePG. +CloudNativePG manages the following predefined labels: `cnpg.io/backupDate` -: The date of the backup in ISO 8601 format (`YYYYMMDD`) +: The date of the backup in ISO 8601 format (`YYYYMMDD`). + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupName` -: Backup identifier, available only on `Backup` and `VolumeSnapshot` - resources +: Backup identifier. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupMonth` -: The year/month when a backup was taken +: The year/month when a backup was taken. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupTimeline` -: The timeline of the instance when a backup was taken +: The timeline of the instance when a backup was taken. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupYear` -: The year a backup was taken +: The year a backup was taken. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/cluster` -: Name of the cluster +: Name of the cluster. `cnpg.io/immediateBackup` : Applied to a `Backup` resource if the backup is the first one created from - a `ScheduledBackup` object having `immediate` set to `true` + a `ScheduledBackup` object having `immediate` set to `true`. `cnpg.io/instanceName` : Name of the PostgreSQL instance (replaces the old and - deprecated `postgresql` label) + deprecated `postgresql` label). `cnpg.io/jobRole` : Role of the job (that is, `import`, `initdb`, `join`, ...) `cnpg.io/onlineBackup` -: Whether the backup is online (hot) or taken when Postgres is down (cold) +: Whether the backup is online (hot) or taken when Postgres is down (cold). + This label is available only on `VolumeSnapshot` resources. `cnpg.io/podRole` : Distinguishes pods dedicated to pooler deployment from those used for - database instances + database instances. `cnpg.io/poolerName` -: Name of the PgBouncer pooler +: Name of the PgBouncer pooler. `cnpg.io/pvcRole` -: Purpose of the PVC, such as `PG_DATA` or `PG_WAL` +: Purpose of the PVC, such as `PG_DATA` or `PG_WAL`. `cnpg.io/reload` : Available on `ConfigMap` and `Secret` resources. When set to `true`, a change in the resource is automatically reloaded by the operator. +`cnpg.io/userType` +: Specifies the type of PostgreSQL user associated with the + `Secret`, either `superuser` (Postgres superuser access) or `app` + (application-level user in CloudNativePG terminology), and is limited to the + default users created by CloudNativePG (typically `postgres` and `app`). + `role` - **deprecated** : Whether the instance running in a pod is a `primary` or a `replica`. This label is deprecated, you should use `cnpg.io/instanceRole` instead. `cnpg.io/scheduled-backup` : When available, name of the `ScheduledBackup` resource that created a given - `Backup` object + `Backup` object. `cnpg.io/instanceRole` : Whether the instance running in a pod is a `primary` or a `replica`. @@ -91,7 +103,7 @@ These predefined labels are managed by CloudNativePG. ## Predefined annotations -These predefined annotations are managed by CloudNativePG. +CloudNativePG manages the following predefined annotations: `container.apparmor.security.beta.kubernetes.io/*` : Name of the AppArmor profile to apply to the named container. @@ -100,15 +112,18 @@ These predefined annotations are managed by CloudNativePG. `cnpg.io/backupEndTime` : The time a backup ended. + This annotation is available only on `VolumeSnapshot` resources. `cnpg.io/backupEndWAL` : The WAL at the conclusion of a backup. + This annotation is available only on `VolumeSnapshot` resources. `cnpg.io/backupStartTime` : The time a backup started. `cnpg.io/backupStartWAL` : The WAL at the start of a backup. + This annotation is available only on `VolumeSnapshot` resources. `cnpg.io/coredumpFilter` : Filter to control the coredump of Postgres processes, expressed with a @@ -154,6 +169,19 @@ These predefined annotations are managed by CloudNativePG. `cnpg.io/podEnvHash` : Deprecated, as the `cnpg.io/podSpec` annotation now also contains the pod environment. +`cnpg.io/podPatch` +: Annotation can be applied on a `Cluster` resource. + + When set to JSON-patch formatted patch, the patch will be applied on the instance Pods. + + **⚠️ WARNING:** This feature may introduce discrepancies between the + operator’s expectations and Kubernetes behavior. Use with caution and only as a + last resort. + + **IMPORTANT**: adding or changing this annotation won't trigger a rolling deployment + of the generated Pods. The latter can be triggered manually by the user with + `kubectl cnpg restart`. + `cnpg.io/podSpec` : Snapshot of the `spec` of the pod generated by the operator. This annotation replaces the old, deprecated `cnpg.io/podEnvHash` annotation. @@ -165,17 +193,17 @@ These predefined annotations are managed by CloudNativePG. : Current status of the PVC: `initializing`, `ready`, or `detached`. `cnpg.io/reconcilePodSpec` -: Annotation can be applied to a `Cluster` or `Pooler` to prevent restarts. +: Annotation can be applied to a `Cluster` or `Pooler` to prevent restarts. - When set to `disabled` on a `Cluster`, the operator prevents instances - from restarting due to changes in the PodSpec. This includes changes to: + When set to `disabled` on a `Cluster`, the operator prevents instances + from restarting due to changes in the PodSpec. This includes changes to: - - Topology or affinity - - Scheduler - - Volumes or containers + - Topology or affinity + - Scheduler + - Volumes or containers - When set to `disabled` on a `Pooler`, the operator restricts any modifications - to the deployment specification, except for changes to `spec.instances`. + When set to `disabled` on a `Pooler`, the operator restricts any modifications + to the deployment specification, except for changes to `spec.instances`. `cnpg.io/reconciliationLoop` : When set to `disabled` on a `Cluster`, the operator prevents the @@ -185,12 +213,12 @@ These predefined annotations are managed by CloudNativePG. : Contains the latest cluster `reload` time. `reload` is triggered by the user through a plugin. `cnpg.io/skipEmptyWalArchiveCheck` -: When set to `true` on a `Cluster` resource, the operator disables the check +: When set to `enabled` on a `Cluster` resource, the operator disables the check that ensures that the WAL archive is empty before writing data. Use at your own risk. `cnpg.io/skipWalArchiving` -: When set to `true` on a `Cluster` resource, the operator disables WAL archiving. +: When set to `enabled` on a `Cluster` resource, the operator disables WAL archiving. This will set `archive_mode` to `off` and require a restart of all PostgreSQL instances. Use at your own risk. @@ -200,6 +228,18 @@ These predefined annotations are managed by CloudNativePG. `cnpg.io/snapshotEndTime` : The time a snapshot was marked as ready to use. +`cnpg.io/validation` +: When set to `disabled` on a CloudNativePG-managed custom resource, the + validation webhook allows all changes without restriction. + + **⚠️ WARNING:** Disabling validation may permit unsafe or destructive + operations. Use this setting with caution and at your own risk. + +`cnpg.io/volumeSnapshotDeadline` +: Applied to `Backup` and `ScheduledBackup` resources, allows you to control + how long the operator should retry recoverable errors before considering the + volume snapshot backup failed. In minutes, defaulting to 10. + `kubectl.kubernetes.io/restartedAt` : When available, the time of last requested restart of a Postgres cluster. diff --git a/docs/src/logging.md b/docs/src/logging.md index f5cfd84bda..a543a5fd25 100644 --- a/docs/src/logging.md +++ b/docs/src/logging.md @@ -1,43 +1,61 @@ # Logging + -The operator is designed to log in JSON format directly to standard output, -including PostgreSQL logs. +CloudNativePG outputs logs in JSON format directly to standard output, including +PostgreSQL logs, without persisting them to storage for security reasons. This +design facilitates seamless integration with most Kubernetes-compatible log +management tools, including command line ones like +[stern](https://github.com/stern/stern). -Each log entry has the following fields: - -- `level` – Log level (`info`, `notice`, ...). -- `ts` – The timestamp (epoch with microseconds). -- `logger` – The type of the record (for example, `postgres` or `pg_controldata`). -- `msg` – The actual message or the keyword `record` in case the message is parsed in JSON format. -- `record` – The actual record with structure that varies depending on the - `logger` type. -- `logging_podName` – The pod where the log was created. - -!!! Warning - Long-term storage and management of logs is outside the operator's purview, - and needs to be provided at the level of the Kubernetes installation. - See the +!!! Important + Long-term storage and management of logs are outside the scope of the + operator and should be handled at the Kubernetes infrastructure level. + For more information, see the [Kubernetes Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) documentation. +Each log entry includes the following fields: + +- `level` – The log level (e.g., `info`, `notice`). +- `ts` – The timestamp. +- `logger` – The type of log (e.g., `postgres`, `pg_controldata`). +- `msg` – The log message, or the keyword `record` if the message is in JSON + format. +- `record` – The actual record, with a structure that varies depending on the + `logger` type. +- `logging_pod` – The name of the pod where the log was generated. + !!! Info - If your log ingestion system requires it, you can rename the `level` and `ts` field names using the `log-field-level` and - `log-field-timestamp` flags of the operator controller. Edit the `Deployment` definition of the - `cloudnative-pg` operator. + If your log ingestion system requires custom field names, you can rename + the `level` and `ts` fields using the `log-field-level` and + `log-field-timestamp` flags in the operator controller. This can be configured + by editing the `Deployment` definition of the `cloudnative-pg` operator. + +## Cluster Logs + +You can configure the log level for the instance pods in the cluster +specification using the `logLevel` option. Available log levels are: `error`, +`warning`, `info` (default), `debug`, and `trace`. -## Operator log +!!! Important + Currently, the log level can only be set at the time the instance starts. + Changes to the log level in the cluster specification after the cluster has + started will only apply to new pods, not existing ones. + +## Operator Logs -You can specify a log level in the cluster spec with the option `logLevel`. -You can set it to `error`, `warning`, `info`(default), `debug`, or `trace`. +The logs produced by the operator pod can be configured with log +levels, same as instance pods: `error`, `warning`, `info` (default), `debug`, +and `trace`. -Currently, you can set the log level only when an instance starts. You can't -change it at runtime. If you change the value in the cluster spec after the cluster -was started, it takes effect only in the new pods and not the old ones. +The log level for the operator can be configured by editing the `Deployment` +definition of the operator and setting the `--log-level` command line argument +to the desired value. -## PostgreSQL log +## PostgreSQL Logs -Each entry in the PostgreSQL log is a JSON object having the `logger` key set -to `postgres` and the structure described in the following example: +Each PostgreSQL log entry is a JSON object with the `logger` key set to +`postgres`. The structure of the log entries is as follows: ```json { @@ -75,35 +93,33 @@ to `postgres` and the structure described in the following example: } ``` -Internally, the operator relies on the PostgreSQL CSV log format. See -the PostgreSQL documentation for more information about the [CSV log -format](https://www.postgresql.org/docs/current/runtime-config-logging.html). +!!! Info + Internally, the operator uses PostgreSQL's CSV log format. For more details, + refer to the [PostgreSQL documentation on CSV log format](https://www.postgresql.org/docs/current/runtime-config-logging.html). -## PGAudit logs +## PGAudit Logs -CloudNativePG has transparent and native support for +CloudNativePG offers seamless and native support for [PGAudit](https://www.pgaudit.org/) on PostgreSQL clusters. -To enable this support, add the required `pgaudit` parameters to the `postgresql` -section in the configuration of the cluster. +To enable PGAudit, add the necessary `pgaudit` parameters in the `postgresql` +section of the cluster configuration. !!! Important - You need to add the PGAudit library to `shared_preload_libraries`. - CloudNativePG adds the library based on the - presence of `pgaudit.*` parameters in the postgresql configuration. - The operator detects and manages the addition and removal of the - library from `shared_preload_libraries`. + The PGAudit library must be added to `shared_preload_libraries`. + CloudNativePG automatically manages this based on the presence of `pgaudit.*` + parameters in the PostgreSQL configuration. The operator handles both the + addition and removal of the library from `shared_preload_libraries`. -The operator also takes care of creating and removing the extension from all -the available databases in the cluster. +Additionally, the operator manages the creation and removal of the PGAudit +extension across all databases within the cluster. !!! Important - CloudNativePG runs the `CREATE EXTENSION` and - `DROP EXTENSION` commands in all databases in the cluster that accept - connections. + CloudNativePG executes the `CREATE EXTENSION` and `DROP EXTENSION` commands + in all databases within the cluster that accept connections. -This example shows a PostgreSQL 13 `Cluster` deployment that results in -`pgaudit` being enabled with the requested configuration: +The following example demonstrates a PostgreSQL `Cluster` deployment with +PGAudit enabled and configured: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -112,7 +128,6 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:13 postgresql: parameters: @@ -125,14 +140,15 @@ spec: size: 1Gi ``` -The audit CSV logs entries returned by PGAudit are then parsed and routed to -stdout in JSON format, similarly to all the remaining logs: +The audit CSV log entries generated by PGAudit are parsed and routed to +standard output in JSON format, similar to all other logs: - `.logger` is set to `pgaudit`. - `.msg` is set to `record`. -- `.record` contains the whole parsed record as a JSON object. This is similar to - `logging_collector` logs, except for `.record.audit`, which contains the - PGAudit CSV message formatted as a JSON object. +- `.record` contains the entire parsed record as a JSON object. This structure + resembles that of `logging_collector` logs, with the exception of + `.record.audit`, which contains the PGAudit CSV message formatted as a JSON + object. This example shows sample log entries: @@ -175,24 +191,26 @@ See the [PGAudit documentation](https://github.com/pgaudit/pgaudit/blob/master/README.md#format) for more details about each field in a record. -## Other logs - -All logs that are produced by the operator and its instances are in JSON -format, with `logger` set according to the process that produced them. -Therefore, all the possible `logger` values are the following: - -- `barman-cloud-wal-archive`: from `barman-cloud-wal-archive` directly -- `barman-cloud-wal-restore`: from `barman-cloud-wal-restore` directly -- `initdb`: from running `initdb` -- `pg_basebackup`: from running `pg_basebackup` -- `pg_controldata`: from running `pg_controldata` -- `pg_ctl`: from running any `pg_ctl` subcommand -- `pg_rewind`: from running `pg_rewind` -- `pgaudit`: from PGAudit extension -- `postgres`: from the `postgres` instance (having `msg` different than `record`) -- `wal-archive`: from the `wal-archive` subcommand of the instance manager -- `wal-restore`: from the `wal-restore` subcommand of the instance manager - -Except for `postgres`, which has the aforementioned structures, -all other possible values have `msg` set to the escaped message that's +## Other Logs + +All logs generated by the operator and its instances are in JSON format, with +the `logger` field indicating the process that produced them. The possible +`logger` values are as follows: + +- `barman-cloud-wal-archive`: logs from `barman-cloud-wal-archive` +- `barman-cloud-wal-restore`: logs from `barman-cloud-wal-restore` +- `initdb`: logs from running `initdb` +- `pg_basebackup`: logs from running `pg_basebackup` +- `pg_controldata`: logs from running `pg_controldata` +- `pg_ctl`: logs from running any `pg_ctl` subcommand +- `pg_rewind`: logs from running `pg_rewind` +- `pgaudit`: logs from the PGAudit extension +- `postgres`: logs from the `postgres` instance (with `msg` distinct from + `record`) +- `wal-archive`: logs from the `wal-archive` subcommand of the instance manager +- `wal-restore`: logs from the `wal-restore` subcommand of the instance manager +- `instance-manager`: from the [PostgreSQL instance manager](./instance_manager.md) + +With the exception of `postgres`, which follows a specific structure, all other +`logger` values contain the `msg` field with the escaped message that is logged. diff --git a/docs/src/logical_replication.md b/docs/src/logical_replication.md new file mode 100644 index 0000000000..02dd5fbea8 --- /dev/null +++ b/docs/src/logical_replication.md @@ -0,0 +1,460 @@ +# Logical Replication + + +PostgreSQL extends its replication capabilities beyond physical replication, +which operates at the level of exact block addresses and byte-by-byte copying, +by offering [logical replication](https://www.postgresql.org/docs/current/logical-replication.html). +Logical replication replicates data objects and their changes based on a +defined replication identity, typically the primary key. + +Logical replication uses a publish-and-subscribe model, where subscribers +connect to publications on a publisher node. Subscribers pull data changes from +these publications and can re-publish them, enabling cascading replication and +complex topologies. + +!!! Important + To protect your logical replication subscribers after a failover of the + publisher cluster in CloudNativePG, ensure that replication slot + synchronization for logical decoding is enabled. Without this, your logical + replication clients may lose data and fail to continue seamlessly after a + failover. For configuration details, see + ["Replication: Logical Decoding Slot Synchronization"](replication.md#logical-decoding-slot-synchronization). + +This flexible model is particularly useful for: + +- Online data migrations +- Live PostgreSQL version upgrades +- Data distribution across systems +- Real-time analytics +- Integration with external applications + +!!! Info + For more details, examples, and limitations, please refer to the + [official PostgreSQL documentation on Logical Replication](https://www.postgresql.org/docs/current/logical-replication.html). + +**CloudNativePG** enhances this capability by providing declarative support for +key PostgreSQL logical replication objects: + +- **Publications** via the `Publication` resource +- **Subscriptions** via the `Subscription` resource + +## Publications + +In PostgreSQL's publish-and-subscribe replication model, a +[**publication**](https://www.postgresql.org/docs/current/logical-replication-publication.html) +is the source of data changes. It acts as a logical container for the change +sets (also known as *replication sets*) generated from one or more tables within +a database. Publications can be defined on any PostgreSQL 10+ instance acting +as the *publisher*, including instances managed by popular DBaaS solutions in the +public cloud. Each publication is tied to a single database and provides +fine-grained control over which tables and changes are replicated. + +For publishers outside Kubernetes, you can [create publications using SQL](https://www.postgresql.org/docs/current/sql-createpublication.html) +or leverage the [`cnpg publication create` plugin command](kubectl-plugin.md#logical-replication-publications). + +When managing `Cluster` objects with **CloudNativePG**, PostgreSQL publications +can be defined declaratively through the `Publication` resource. + +!!! Info + Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Publication) + for the full list of attributes you can define for each `Publication` object. + +Suppose you have a cluster named `freddie` and want to replicate all tables in +the `app` database. Here's a `Publication` manifest: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: freddie-publisher +spec: + cluster: + name: freddie + dbname: app + name: publisher + target: + allTables: true +``` + +In the above example: + +- The publication object is named `freddie-publisher` (`metadata.name`). +- The publication is created via the primary of the `freddie` cluster + (`spec.cluster.name`) with name `publisher` (`spec.name`). +- It includes all tables (`spec.target.allTables: true`) from the `app` + database (`spec.dbname`). + +!!! Important + While `allTables` simplifies configuration, PostgreSQL offers fine-grained + control for replicating specific tables or targeted data changes. For advanced + configurations, consult the [PostgreSQL documentation](https://www.postgresql.org/docs/current/logical-replication.html). + Additionally, refer to the [CloudNativePG API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-PublicationTarget) + for details on declaratively customizing replication targets. + +### Required Fields in the `Publication` Manifest + +The following fields are required for a `Publication` object: + +- `metadata.name`: Unique name for the Kubernetes `Publication` object. +- `spec.cluster.name`: Name of the PostgreSQL cluster. +- `spec.dbname`: Database name where the publication is created. +- `spec.name`: Publication name in PostgreSQL. +- `spec.target`: Specifies the tables or changes to include in the publication. + +The `Publication` object must reference a specific `Cluster`, determining where +the publication will be created. It is managed by the cluster's primary instance, +ensuring the publication is created or updated as needed. + +### Reconciliation and Status + +After creating a `Publication`, CloudNativePG manages it on the primary +instance of the specified cluster. Following a successful reconciliation cycle, +the `Publication` status will reflect the following: + +- `applied: true`, indicates the configuration has been successfully applied. +- `observedGeneration` matches `metadata.generation`, confirming the applied + configuration corresponds to the most recent changes. + +If an error occurs during reconciliation, `status.applied` will be `false`, and +an error message will be included in the `status.message` field. + +### Removing a publication + +The `publicationReclaimPolicy` field controls the behavior when deleting a +`Publication` object: + +- `retain` (default): Leaves the publication in PostgreSQL for manual + management. +- `delete`: Automatically removes the publication from PostgreSQL. + +Consider the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: freddie-publisher +spec: + cluster: + name: freddie + dbname: app + name: publisher + target: + allTables: true + publicationReclaimPolicy: delete +``` + +In this case, deleting the `Publication` object also removes the `publisher` +publication from the `app` database of the `freddie` cluster. + +## Subscriptions + +In PostgreSQL's publish-and-subscribe replication model, a +[**subscription**](https://www.postgresql.org/docs/current/logical-replication-subscription.html) +represents the downstream component that consumes data changes. +A subscription establishes the connection to a publisher's database and +specifies the set of publications (one or more) it subscribes to. Subscriptions +can be created on any supported PostgreSQL instance acting as the *subscriber*. + +!!! Important + Since schema definitions are not replicated, the subscriber must have the + corresponding tables already defined before data replication begins. + +CloudNativePG simplifies subscription management by enabling you to define them +declaratively using the `Subscription` resource. + +!!! Info + Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Subscription) + for the full list of attributes you can define for each `Subscription` object. + +Suppose you want to replicate changes from the `publisher` publication on the +`app` database of the `freddie` cluster (*publisher*) to the `app` database of +the `king` cluster (*subscriber*). Here's an example of a `Subscription` +manifest: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: freddie-to-king-subscription +spec: + cluster: + name: king + dbname: app + name: subscriber + externalClusterName: freddie + publicationName: publisher +``` + +In the above example: + +- The subscription object is named `freddie-to-king-subscriber` (`metadata.name`). +- The subscription is created in the `app` database (`spec.dbname`) of the + `king` cluster (`spec.cluster.name`), with name `subscriber` (`spec.name`). +- It connects to the `publisher` publication in the external `freddie` cluster, + referenced by `spec.externalClusterName`. + +To facilitate this setup, the `freddie` external cluster must be defined in the +`king` cluster's configuration. Below is an example excerpt showing how to +define the external cluster in the `king` manifest: + +```yaml +externalClusters: + - name: freddie + connectionParameters: + host: freddie-rw.default.svc + user: postgres + dbname: app +``` + +!!! Info + For more details on configuring the `externalClusters` section, see the + ["Bootstrap" section](bootstrap.md#the-externalclusters-section) of the + documentation. + +As you can see, a subscription can connect to any PostgreSQL database +accessible over the network. This flexibility allows you to seamlessly migrate +your data into Kubernetes with nearly zero downtime. It’s an excellent option +for transitioning from various environments, including popular cloud-based +Database-as-a-Service (DBaaS) platforms. + +### Required Fields in the `Subscription` Manifest + +The following fields are mandatory for defining a `Subscription` object: + +- `metadata.name`: A unique name for the Kubernetes `Subscription` object + within its namespace. +- `spec.cluster.name`: The name of the PostgreSQL cluster where the + subscription will be created. +- `spec.dbname`: The name of the database in which the subscription will be + created. +- `spec.name`: The name of the subscription as it will appear in PostgreSQL. +- `spec.externalClusterName`: The name of the external cluster, as defined in + the `spec.cluster.name` cluster's configuration. This references the + publisher database. +- `spec.publicationName`: The name of the publication in the publisher database + to which the subscription will connect. + +The `Subscription` object must reference a specific `Cluster`, determining +where the subscription will be managed. CloudNativePG ensures that the +subscription is created or updated on the primary instance of the specified +cluster. + +### Reconciliation and Status + +After creating a `Subscription`, CloudNativePG manages it on the primary +instance of the specified cluster. Following a successful reconciliation cycle, +the `Subscription` status will reflect the following: + +- `applied: true`, indicates the configuration has been successfully applied. +- `observedGeneration` matches `metadata.generation`, confirming the applied + configuration corresponds to the most recent changes. + +If an error occurs during reconciliation, `status.applied` will be `false`, and +an error message will be included in the `status.message` field. + +### Removing a Subscription + +The `subscriptionReclaimPolicy` field controls the behavior when deleting a +`Subscription` object: + +- `retain` (default): Leaves the subscription in PostgreSQL for manual + management. +- `delete`: Automatically removes the subscription from PostgreSQL. + +Consider the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: freddie-to-king-subscription +spec: + cluster: + name: king + dbname: app + name: subscriber + externalClusterName: freddie + publicationName: publisher + subscriptionReclaimPolicy: delete +``` + +In this case, deleting the `Subscription` object also removes the `subscriber` +subscription from the `app` database of the `king` cluster. + +### Resilience to Failovers + +To ensure that your logical replication subscriptions remain operational after +a failover of the publisher, configure CloudNativePG to synchronize logical +decoding slots across the cluster. For detailed instructions, see +[Logical Decoding Slot Synchronization](replication.md#logical-decoding-slot-synchronization). + +## Limitations + +Logical replication in PostgreSQL has some inherent limitations, as outlined in +the [official documentation](https://www.postgresql.org/docs/current/logical-replication-restrictions.html). +Notably, the following objects are not replicated: + +- **Database schema and DDL commands** +- **Sequence data** +- **Large objects** + +### Addressing Schema Replication + +The first limitation, related to schema replication, can be easily addressed +using CloudNativePG's capabilities. For instance, you can leverage the `import` +bootstrap feature to copy the schema of the tables you need to replicate. +Alternatively, you can manually create the schema as you would for any +PostgreSQL database. + +### Handling Sequences + +While sequences are not automatically kept in sync through logical replication, +CloudNativePG provides a solution to be used in live migrations. +You can use the [`cnpg` plugin](kubectl-plugin.md#synchronizing-sequences) +to synchronize sequence values, ensuring consistency between the publisher and +subscriber databases. + +## Example of live migration and major Postgres upgrade with logical replication + +To highlight the powerful capabilities of logical replication, this example +demonstrates how to replicate data from a publisher database (`freddie`) +running PostgreSQL 16 to a subscriber database (`king`) running the latest +PostgreSQL version. This setup can be deployed in your Kubernetes cluster for +evaluation and hands-on learning. + +This example illustrates how logical replication facilitates live migrations +and upgrades between PostgreSQL versions while ensuring data consistency. By +combining logical replication with CloudNativePG, you can easily set up, +manage, and evaluate such scenarios in a Kubernetes environment. + +### Step 1: Setting Up the Publisher (`freddie`) + +The first step involves creating a `freddie` PostgreSQL cluster with version 16. +The cluster contains a single instance and includes an `app` database +initialized with a table, `n`, storing 10,000 numbers. A logical replication +publication named `publisher` is also configured to include all tables in the +database. + +Here’s the manifest for setting up the `freddie` cluster and its publication +resource: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: freddie +spec: + instances: 1 + + imageName: ghcr.io/cloudnative-pg/postgresql:16 + + storage: + size: 1Gi + + bootstrap: + initdb: + postInitApplicationSQL: + - CREATE TABLE n (i SERIAL PRIMARY KEY, m INTEGER) + - INSERT INTO n (m) (SELECT generate_series(1, 10000)) + - ALTER TABLE n OWNER TO app + + managed: + roles: + - name: app + login: true + replication: true +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: freddie-publisher +spec: + cluster: + name: freddie + dbname: app + name: publisher + target: + allTables: true +``` + +### Step 2: Setting Up the Subscriber (`king`) + +Next, create the `king` PostgreSQL cluster, running the latest version of +PostgreSQL. This cluster initializes by importing the schema from the `app` +database on the `freddie` cluster using the external cluster configuration. A +`Subscription` resource, `freddie-to-king-subscription`, is then configured to +consume changes published by the `publisher` on `freddie`. + +Below is the manifest for setting up the `king` cluster and its subscription: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: king +spec: + instances: 1 + + storage: + size: 1Gi + + bootstrap: + initdb: + import: + type: microservice + schemaOnly: true + databases: + - app + source: + externalCluster: freddie + + externalClusters: + - name: freddie + connectionParameters: + host: freddie-rw.default.svc + user: app + dbname: app + password: + name: freddie-app + key: password +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: freddie-to-king-subscription +spec: + cluster: + name: king + dbname: app + name: subscriber + externalClusterName: freddie + publicationName: publisher +``` + +Once the `king` cluster is running, you can verify that the replication is +working by connecting to the `app` database and counting the records in the `n` +table. The following example uses the `psql` command provided by the `cnpg` +plugin for simplicity: + +```console +kubectl cnpg psql king -- app -qAt -c 'SELECT count(*) FROM n' +10000 +``` + +This command should return `10000`, confirming that the data from the `freddie` +cluster has been successfully replicated to the `king` cluster. + +Using the `cnpg` plugin, you can also synchronize existing sequences to ensure +consistency between the publisher and subscriber. The example below +demonstrates how to synchronize a sequence for the `king` cluster: + +```console +kubectl cnpg subscription sync-sequences king --subscription=subscriber +SELECT setval('"public"."n_i_seq"', 10000); + +10000 +``` + +This command updates the sequence `n_i_seq` in the `king` cluster to match the +current value, ensuring it is in sync with the source database. diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 20581eeb54..a63120d34e 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -1,9 +1,10 @@ # Monitoring + !!! Important Installing Prometheus and Grafana is beyond the scope of this project. We assume they are correctly installed in your system. However, for - experimentation we provide instructions in + experimentation we provide instructions in [Part 4 of the Quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana). ## Monitoring Instances @@ -54,10 +55,10 @@ by specifying a list of one or more databases in the `target_databases` option. with Prometheus and Grafana, you can find a quick setup guide in [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana) -### Prometheus Operator example +### Monitoring with the Prometheus operator A specific PostgreSQL cluster can be monitored using the -[Prometheus Operator's](https://github.com/prometheus-operator/prometheus-operator) resource +[Prometheus Operator's](https://github.com/prometheus-operator/prometheus-operator) resource [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.75.1/Documentation/api.md#podmonitor). A `PodMonitor` that correctly points to the Cluster can be automatically created by the operator by setting @@ -217,17 +218,17 @@ cnpg_collector_up{cluster="cluster-example"} 1 # HELP cnpg_collector_postgres_version Postgres version # TYPE cnpg_collector_postgres_version gauge -cnpg_collector_postgres_version{cluster="cluster-example",full="16.4"} 16.4 +cnpg_collector_postgres_version{cluster="cluster-example",full="17.5"} 17.5 -# HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp +# HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp (Deprecated) # TYPE cnpg_collector_last_failed_backup_timestamp gauge cnpg_collector_last_failed_backup_timestamp 0 -# HELP cnpg_collector_last_available_backup_timestamp The last available backup as a unix timestamp +# HELP cnpg_collector_last_available_backup_timestamp The last available backup as a unix timestamp (Deprecated) # TYPE cnpg_collector_last_available_backup_timestamp gauge cnpg_collector_last_available_backup_timestamp 1.63238406e+09 -# HELP cnpg_collector_first_recoverability_point The first point of recoverability for the cluster as a unix timestamp +# HELP cnpg_collector_first_recoverability_point The first point of recoverability for the cluster as a unix timestamp (Deprecated) # TYPE cnpg_collector_first_recoverability_point gauge cnpg_collector_first_recoverability_point 1.63238406e+09 @@ -397,9 +398,16 @@ go_threads 18 `Major.Minor.Patch` can be found inside one of its label field named `full`. -!!! Note - `cnpg_collector_first_recoverability_point` and `cnpg_collector_last_available_backup_timestamp` - will be zero until your first backup to the object store. This is separate from the WAL archival. +!!! Warning + The metrics `cnpg_collector_last_failed_backup_timestamp`, + `cnpg_collector_last_available_backup_timestamp`, and + `cnpg_collector_first_recoverability_point` have been deprecated starting + from version 1.26. These metrics will continue to function with native backup + solutions such as in-core Barman Cloud (deprecated) and volume snapshots. Note + that for these cases, `cnpg_collector_first_recoverability_point` and + `cnpg_collector_last_available_backup_timestamp` will remain zero until the + first backup is completed to the object store. This is separate from WAL + archiving. ### User defined metrics @@ -636,7 +644,6 @@ The possible values for `usage` are: | `DURATION` | use this column as a text duration (in milliseconds) | | `HISTOGRAM` | use this column as a histogram | - Please visit the ["Metric Types" page](https://prometheus.io/docs/concepts/metric_types/) from the Prometheus documentation for more information. @@ -652,7 +659,6 @@ cnpg__{= ... } 8080:8080 +``` + +With port forwarding active, the metrics are easily viewable on a browser at +[`localhost:8080/metrics`](http://localhost:8080/metrics). + +### Using curl + +Create the `curl` pod with the following command: ```yaml +kubectl apply -f - <:9187/metrics @@ -792,14 +829,15 @@ kubectl exec -ti curl -- curl -s ${POD_IP}:9187/metrics ``` If you enabled TLS metrics, run instead: + ```shell kubectl exec -ti curl -- curl -sk https://${POD_IP}:9187/metrics ``` -In case you want to access the metrics of the operator, you need to point +To access the metrics of the operator, you need to point to the pod where the operator is running, and use TCP port 8080 as target. -At the end of the inspection, please make sure you delete the `curl` pod: +When you're done inspecting metrics, please remember to delete the `curl` pod: ```shell kubectl delete -f curl.yaml @@ -826,12 +864,15 @@ section for context: In addition, we provide the "raw" sources for the Prometheus alert rules in the `alerts.yaml` file. -The [Grafana dashboard](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) has a dedicated repository now. - -Note that, for the configuration of `kube-prometheus-stack`, other fields and -settings are available over what we provide in `kube-stack-config.yaml`. +A Grafana dashboard for CloudNativePG clusters and operator, is kept in the +dedicated repository [`cloudnative-pg/grafana-dashboards`](https://github.com/cloudnative-pg/grafana-dashboards/tree/main) +as a dashboard JSON configuration: +[`grafana-dashboard.json`](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json). +The file can be downloaded, and imported into Grafana +(menus: Dashboard > New > Import). -You can execute `helm show values prometheus-community/kube-prometheus-stack` -to view them. For further information, please refer to the +For a general reference on the settings available on `kube-prometheus-stack`, +you can execute `helm show values prometheus-community/kube-prometheus-stack`. +Please refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) -page. +page for more detail. diff --git a/docs/src/networking.md b/docs/src/networking.md index d64624d3c3..3840550433 100644 --- a/docs/src/networking.md +++ b/docs/src/networking.md @@ -1,4 +1,5 @@ # Networking + CloudNativePG assumes the underlying Kubernetes cluster has the required connectivity already set up. diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 4c6aed31f7..a73eba0dd9 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -1,4 +1,5 @@ # Operator capability levels + These capabilities were implemented by CloudNativePG, classified using the @@ -75,7 +76,7 @@ of the CloudNativePG deployment in your Kubernetes infrastructure. ### Self-contained instance manager Instead of relying on an external tool to -coordinate PostgreSQL instances in the Kubernetes cluster pods, +coordinate PostgreSQL instances in the Kubernetes cluster pods, such as Patroni or Stolon, the operator injects the operator executable inside each pod, in a file named `/controller/manager`. The application is used to control the underlying @@ -114,11 +115,11 @@ than `1`, the operator manages `instances -1` replicas, including high availability (HA) through automated failover and rolling updates through switchover operations. -CloudNativePG manages replication slots for all the replicas -in the HA cluster. The implementation is inspired by the previously -proposed patch for PostgreSQL, called -[failover slots](https://wiki.postgresql.org/wiki/Failover_slots), and -also supports user defined physical replication slots on the primary. +CloudNativePG manages replication slots for all replicas in the +high-availability cluster. It also supports user-defined physical replication +slots on the primary and enables logical decoding failover—natively for +PostgreSQL 17 and later using `sync_replication_slots`, and through the +`pg_failover_slots` extension for earlier versions. ### Service Configuration @@ -136,14 +137,19 @@ PostgreSQL outside Kubernetes. This is particularly useful for DBaaS purposes. ### Database configuration -The operator is designed to manage a PostgreSQL cluster with a single -database. The operator transparently manages access to the database through -three Kubernetes services provisioned and managed for read-write, +The operator is designed to bootstrap a PostgreSQL cluster with a single +database. The operator transparently manages network access to the cluster +through three Kubernetes services provisioned and managed for read-write, read, and read-only workloads. Using the convention-over-configuration approach, the operator creates a database called `app`, by default owned by a regular Postgres user with the same name. You can specify both the database name and the user name, if -required. +required, as part of the bootstrap. + +Additional databases can be created or managed via +[declarative database management](declarative_database_management.md) using +the `Database` CRD, also supporting extensions, schemas and foreign data +wrappers (FDW). Although no configuration is required to run the cluster, you can customize both PostgreSQL runtime configuration and PostgreSQL host-based @@ -155,7 +161,7 @@ CloudNativePG supports [management of PostgreSQL roles, users, and groups through declarative configuration](declarative_role_management.md) using the `.spec.managed.roles` stanza. -### Pod security policies +### Pod security standards For InfoSec requirements, the operator doesn't require privileged mode for any container. It enforces a read-only root filesystem to guarantee containers @@ -223,7 +229,7 @@ includes integration with cert-manager. ### Certificate authentication for streaming replication -To authorize streaming replication connections from the standby servers, +To authorize streaming replication connections from the standby servers, the operator relies on TLS client certificate authentication. This method is used instead of relying on a password (and therefore a secret). @@ -285,25 +291,26 @@ workload, in this case PostgreSQL servers. This includes PostgreSQL minor release updates (security and bug fixes normally) as well as major online upgrades. -### Upgrade of the operator +### Operator Upgrade + +Upgrading the operator is seamless and can be done as a new deployment. After +upgrading the controller, a rolling update of all deployed PostgreSQL clusters +is initiated. You can choose to update all clusters simultaneously or +distribute their upgrades over time. -You can upgrade the operator seamlessly as a new deployment. Because of the instance -manager's injection, a change in the -operator doesn't require a change in the operand. -The operator can manage older versions of the operand. +Thanks to the instance manager's injection, upgrading the operator does not +require changes to the operand, allowing the operator to manage older versions +of it. -CloudNativePG also supports [in-place updates of the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager) -following an upgrade of the operator. In-place updates don't require a rolling -update (and subsequent switchover) of the cluster. +Additionally, CloudNativePG supports [in-place updates of the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager) +following an operator upgrade. In-place updates do not require a rolling update +or a subsequent switchover of the cluster. ### Upgrade of the managed workload The operand can be upgraded using a declarative configuration approach as -part of changing the CR and, in particular, the `imageName` parameter. The -operator prevents major upgrades of PostgreSQL while making it possible to go -in both directions in terms of minor PostgreSQL releases within a major -version, enabling updates and rollbacks. - +part of changing the CR and, in particular, the `imageName` parameter. +This is normally initiated by security updates or Postgres minor version updates. In the presence of standby servers, the operator performs rolling updates starting from the replicas. It does this by dropping the existing pod and creating a new one with the new requested operand image that reuses the underlying storage. @@ -315,11 +322,24 @@ The setting to use depends on the business requirements, as the operation might generate some downtime for the applications. This downtime can range from a few seconds to minutes, based on the actual database workload. +### Offline In-Place Major Upgrades of PostgreSQL + +CloudNativePG supports declarative offline in-place major upgrades when a new +operand container image with a higher PostgreSQL major version is applied to a +cluster. The upgrade can be triggered by updating the image tag via the +`.spec.imageName` option or by using an image catalog to manage version +changes. During the upgrade, all cluster pods are shut down to ensure data +consistency. A new job is then created to validate the upgrade conditions, +execute `pg_upgrade`, and create new directories for `PGDATA`, WAL files, and +tablespaces if needed. Once the upgrade is complete, replicas are re-created. +Failed upgrades can be rolled back. + ### Display cluster availability status during upgrade At any time, convey the cluster's high availability status, for example, `Setting up primary`, `Creating a new replica`, `Cluster in healthy state`, -`Switchover in progress`, `Failing over`, and `Upgrading cluster`. +`Switchover in progress`, `Failing over`, `Upgrading cluster`, and `Upgrading +Postgres major version`. ## Level 3: Full lifecycle @@ -328,11 +348,12 @@ continuity and scalability. *Disaster recovery* is a business continuity component that requires that both backup and recovery of a database work correctly. While as a -starting point, the goal is to achieve RPO < 5 minutes, the long-term goal is -to implement RPO=0 backup solutions. *High availability* is the other -important component of business continuity. Through PostgreSQL native -physical replication and hot standby replicas, it allows the operator to perform -failover and switchover operations. This area includes enhancements in: +starting point, the goal is to achieve [RPO](before_you_start.md#rpo) < 5 +minutes, the long-term goal is to implement RPO=0 backup solutions. *High +availability* is the other important component of business continuity. Through +PostgreSQL native physical replication and hot standby replicas, it allows the +operator to perform failover and switchover operations. This area includes +enhancements in: - Control of PostgreSQL physical replication, such as synchronous replication, (cascading) replication clusters, and so on @@ -355,55 +376,54 @@ user action. The operator transparently sets the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL files to the defined endpoint. You can decide the compression algorithm, as well as the number of parallel jobs to concurrently upload WAL files -in the archive. In addition, `Instance Manager` checks -the correctness of the archive destination by performing the `barman-cloud-check-wal-archive` +in the archive. In addition, `Instance Manager` checks +the correctness of the archive destination by performing the `barman-cloud-check-wal-archive` command before beginning to ship the first set of WAL files. -### PostgreSQL backups - -The operator was designed to provide application-level backups using -PostgreSQL’s native continuous hot backup technology based on -physical base backups and continuous WAL archiving. -Base backups can be saved on: +### PostgreSQL Backups -- Kubernetes volume snapshots -- Object stores (AWS S3 and S3-compatible, Azure Blob Storage, Google Cloud - Storage, and gateways like MinIO) +CloudNativePG provides a pluggable interface (CNPG-I) for managing +application-level backups using PostgreSQL’s native physical backup +mechanisms—namely base backups and continuous WAL archiving. This +design enables flexibility and extensibility while ensuring consistency and +performance. -Base backups are defined at the cluster level, declaratively, -through the `backup` parameter in the cluster definition. +The CloudNativePG Community officially supports the [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/), +which enables continuous physical backups to object stores, along with full and +Point-In-Time Recovery (PITR) capabilities. -You can define base backups in two ways: +In addition to CNPG-I plugins, CloudNativePG also natively supports backups +using Kubernetes volume snapshots, when supported by the underlying storage +class and CSI driver. -- On-demand, through the `Backup` custom resource definition -- Scheduled, through the `ScheduledBackup`custom resource definition, using a cron-like syntax +You can initiate base backups in two ways: -Volume snapshots rely directly on the Kubernetes API, which delegates this -capability to the underlying storage classes and CSI drivers. Volume snapshot -backups are suitable for very large database (VLDB) contexts. +- On-demand, using the `Backup` custom resource +- Scheduled, using the `ScheduledBackup` custom resource, with a cron-like + schedule format -Object store backups rely on `barman-cloud-backup` for the job (distributed as -part of the application container image) to relay backups in the same endpoint, -alongside WAL files. +Volume snapshots leverage the Kubernetes API and are particularly effective for +very large databases (VLDBs) due to their speed and storage efficiency. -Both `barman-cloud-wal-restore` and `barman-cloud-backup` are distributed in -the application container image under GNU GPL 3 terms. +Both volume snapshots and CNPG-I-based backups support: -Object store backups and volume snapshot backups are taken while PostgreSQL is -up and running (hot backups). Volume snapshots also support taking consistent -database snapshots with cold backups. +- Hot backups: Taken while PostgreSQL is running, ensuring minimal + disruption. +- Cold backups: Performed by temporarily stopping PostgreSQL to ensure a + fully consistent snapshot, when required. ### Backups from a standby The operator supports offloading base backups onto a standby without impacting -the RPO of the database. This allows resources to be preserved on the primary, in -particular I/O, for standard database operations. +the [RPO](before_you_start.md#rpo) of the database. This allows resources to +be preserved on the primary, in particular I/O, for standard database +operations. ### Full restore from a backup The operator enables you to bootstrap a new cluster (with its settings) -starting from an existing and accessible backup, either on a volume snapshot -or in an object store. +starting from an existing and accessible backup, either on a volume snapshot, +or in an object store, or via a plugin. Once the bootstrap process is completed, the operator initiates the instance in recovery mode. It replays all available WAL files from the specified archive, @@ -452,8 +472,8 @@ switchover across data centers remains necessary.) Additionally, the flexibility extends to creating delayed replica clusters intentionally lagging behind the primary cluster. This intentional lag aims to -minimize the Recovery Time Objective (RTO) in the event of unintended errors, -such as incorrect `DELETE` or `UPDATE` SQL operations. +minimize the Recovery Time Objective ([RTO](before_you_start.md#rto)) in the +event of unintended errors, such as incorrect `DELETE` or `UPDATE` SQL operations. ### Distributed Database Topologies @@ -486,18 +506,19 @@ scalability of PostgreSQL databases, ensuring a streamlined and optimized experience for managing large scale data storage in cloud-native environments. Support for temporary tablespaces is also included. -### Liveness and readiness probes +### Customizable Startup, Liveness, and Readiness Probes -The operator defines liveness and readiness probes for the Postgres -containers that are then invoked by the kubelet. They're mapped respectively -to the `/healthz` and `/readyz` endpoints of the web server managed -directly by the instance manager. +CloudNativePG configures startup, liveness, and readiness probes for PostgreSQL +containers, which are managed by the Kubernetes kubelet. These probes interact +with the `/startupz`, `/healthz`, and `/readyz` endpoints exposed by +the instance manager's web server to monitor the Pod's health and readiness. -The liveness probe is based on the `pg_isready` executable, and the pod is -considered healthy with exit codes 0 (server accepting connections normally) -and 1 (server is rejecting connections, for example, during startup). The -readiness probe issues a simple query (`;`) to verify that the server is -ready to accept connections. +All probes are configured with default settings but can be fully customized to +meet specific needs, allowing for fine-tuning to align with your environment +and workloads. + +For detailed configuration options and advanced usage, +refer to the [Postgres instance manager](instance_manager.md) documentation. ### Rolling deployments @@ -542,22 +563,13 @@ that, until the fence is lifted, data on the pod isn't modified by PostgreSQL and that you can investigate file system for debugging and troubleshooting purposes. -### Hibernation (declarative) +### Hibernation CloudNativePG supports [hibernation of a running PostgreSQL cluster](declarative_hibernation.md) in a declarative manner, through the `cnpg.io/hibernation` annotation. Hibernation enables saving CPU power by removing the database pods while keeping the database PVCs. This feature simulates scaling to 0 instances. -### Hibernation (imperative) - -CloudNativePG supports [hibernation of a running PostgreSQL cluster](kubectl-plugin.md#cluster-hibernation) -by way of the `cnpg` plugin. Hibernation shuts down all Postgres instances in the -high-availability cluster and keeps a static copy of the PVC group of the -primary. The copy contains `PGDATA` and WALs. The plugin enables you to exit the -hibernation phase by resuming the primary and then recreating all the -replicas, if they exist. - ### Reuse of persistent volumes storage in pods When the operator needs to create a pod that was deleted by the user or @@ -582,6 +594,15 @@ and makes the use of the underlying PostgreSQL resources more efficient. Instead of connecting directly to a PostgreSQL service, applications can now connect to the PgBouncer service and start reusing any existing connection. +### Logical Replication + +CloudNativePG supports PostgreSQL's logical replication in a declarative manner +using `Publication` and `Subscription` custom resource definitions. + +Logical replication is particularly useful together with the import facility +for online data migrations (even from public DBaaS solutions) and major +PostgreSQL upgrades. + ## Level 4: Deep insights Capability level 4 is about *observability*: monitoring, diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 26bf16678a..6be43321ab 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -1,4 +1,5 @@ # Operator configuration + The operator for CloudNativePG is installed from a standard deployment manifest and follows the convention over configuration paradigm. @@ -35,17 +36,23 @@ The operator looks for the following environment variables to be defined in the Name | Description ---- | ----------- -`INHERITED_ANNOTATIONS` | list of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods -`INHERITED_LABELS` | list of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods -`PULL_SECRET_NAME` | name of an additional pull secret to be defined in the operator's namespace and to be used to download images -`ENABLE_AZURE_PVC_UPDATES` | Enables to delete Postgres pod if its PVC is stuck in Resizing condition. This feature is mainly for the Azure environment (default `false`) -`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | when set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) -`MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters -`MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `CERTIFICATE_DURATION` | Determines the lifetime of the generated certificates in days. Default is 90. +`CLUSTERS_ROLLOUT_DELAY` | The duration (in seconds) to wait between the roll-outs of different clusters during an operator upgrade. This setting controls the timing of upgrades across clusters, spreading them out to reduce system impact. The default value is `0` which means no delay between PostgreSQL cluster upgrades. +`CREATE_ANY_SERVICE` | When set to `true`, will create `-any` service for the cluster. Default is `false` +`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | When set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) `EXPIRING_CHECK_THRESHOLD` | Determines the threshold, in days, for identifying a certificate as expiring. Default is 7. -`CREATE_ANY_SERVICE` | when set to `true`, will create `-any` service for the cluster. Default is `false` `INCLUDE_PLUGINS` | A comma-separated list of plugins to be always included in the Cluster's reconciliation. +`INHERITED_ANNOTATIONS` | List of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods +`INHERITED_LABELS` | List of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods +`INSTANCES_ROLLOUT_DELAY` | The duration (in seconds) to wait between roll-outs of individual PostgreSQL instances within the same cluster during an operator upgrade. The default value is `0`, meaning no delay between upgrades of instances in the same PostgreSQL cluster. +`KUBERNETES_CLUSTER_DOMAIN` | Defines the domain suffix for service FQDNs within the Kubernetes cluster. If left unset, it defaults to "cluster.local". +`MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters +`MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters +`OPERATOR_IMAGE_NAME` | The name of the operator image used to bootstrap Pods. Defaults to the image specified during installation. +`POSTGRES_IMAGE_NAME` | The name of the PostgreSQL image used by default for new clusters. Defaults to the version specified in the operator. +`PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images +`STANDBY_TCP_USER_TIMEOUT` | Defines the [`TCP_USER_TIMEOUT` socket option](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-TCP-USER-TIMEOUT) for replication connections from standby instances to the primary. Default is 0 (system's default). +`DRAIN_TAINTS` | Specifies the taint keys that should be interpreted as indicators of node drain. By default, it includes the taints commonly applied by [kubectl](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), [Cluster Autoscaler](https://github.com/kubernetes/autoscaler), and [Karpenter](https://github.com/aws/karpenter-provider-aws): `node.kubernetes.io/unschedulable`, `ToBeDeletedByClusterAutoscaler`, `karpenter.sh/disrupted`, `karpenter.sh/disruption`. Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match both the value `example.com/one` and `example.com/two`. @@ -62,9 +69,10 @@ will ignore the configuration parameter. The example below customizes the behavior of the operator, by defining the label/annotation names to be inherited by the resources created by -any `Cluster` object that is deployed at a later time, and by enabling +any `Cluster` object that is deployed at a later time, by enabling [in-place updates for the instance -manager](installation_upgrade.md#in-place-updates-of-the-instance-manager). +manager](installation_upgrade.md#in-place-updates-of-the-instance-manager), +and by spreading upgrades. ```yaml apiVersion: v1 @@ -73,9 +81,11 @@ metadata: name: cnpg-controller-manager-config namespace: cnpg-system data: + CLUSTERS_ROLLOUT_DELAY: '60' + ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' INHERITED_ANNOTATIONS: categories INHERITED_LABELS: environment, workload, app - ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' + INSTANCES_ROLLOUT_DELAY: '10' ``` ## Defining an operator secret @@ -84,7 +94,8 @@ The example below customizes the behavior of the operator, by defining the label/annotation names to be inherited by the resources created by any `Cluster` object that is deployed at a later time, and by enabling [in-place updates for the instance -manager](installation_upgrade.md#in-place-updates-of-the-instance-manager). +manager](installation_upgrade.md#in-place-updates-of-the-instance-manager), +and by spreading upgrades. ```yaml apiVersion: v1 @@ -94,9 +105,11 @@ metadata: namespace: cnpg-system type: Opaque stringData: + CLUSTERS_ROLLOUT_DELAY: '60' + ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' INHERITED_ANNOTATIONS: categories INHERITED_LABELS: environment, workload, app - ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' + INSTANCES_ROLLOUT_DELAY: '10' ``` ## Restarting the operator to reload configs @@ -145,7 +158,8 @@ You can do this by executing these commands: kubectl edit deployment -n cnpg-system cnpg-controller-manager ``` -Then on the edit page scroll down the container args and add `--pprof-server=true`, example: +Then on the edit page scroll down the container args and add +`--pprof-server=true`, as in this example: ```yaml containers: @@ -160,7 +174,8 @@ Then on the edit page scroll down the container args and add `--pprof-server=tru - /manager ``` -Save the changes, the deployment now will execute a rollout and the new pod will have the PPROF server enabled. +Save the changes; the deployment now will execute a roll-out, and the new pod +will have the PPROF server enabled. Once the pod is running you can exec inside the container by doing: diff --git a/docs/src/postgis.md b/docs/src/postgis.md index ada7b8dc4f..087efcdf4c 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -1,4 +1,5 @@ # PostGIS + [PostGIS](https://postgis.net/) is a very popular open source extension for PostgreSQL that introduces support for storing GIS (Geographic Information @@ -46,7 +47,7 @@ do this in two ways: ## Create a new PostgreSQL cluster with PostGIS -Let's suppose you want to create a new PostgreSQL 14 cluster with PostGIS 3.2. +Let's suppose you want to create a new PostgreSQL 17 cluster with PostGIS 3.2. The first step is to ensure you use the right PostGIS container image for the operand, and properly set the `.spec.imageName` option in the `Cluster` @@ -59,7 +60,7 @@ provides some guidance on how the creation of a PostGIS cluster can be done. Please consider that, although convention over configuration applies in CloudNativePG, you should spend time configuring and tuning your system for production. Also the `imageName` in the example below deliberately points - to the latest available image for PostgreSQL 14 - you should use a specific + to the latest available image for PostgreSQL 17 - you should use a specific image name or, preferably, the SHA256 digest for true immutability. ```yaml @@ -68,71 +69,75 @@ kind: Cluster metadata: name: postgis-example spec: - instances: 3 - imageName: ghcr.io/cloudnative-pg/postgis:14 - bootstrap: - initdb: - postInitTemplateSQL: - - CREATE EXTENSION postgis; - - CREATE EXTENSION postgis_topology; - - CREATE EXTENSION fuzzystrmatch; - - CREATE EXTENSION postgis_tiger_geocoder; - + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgis:17 storage: size: 1Gi + postgresql: + parameters: + log_statement: ddl +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: postgis-example-app +spec: + name: app + owner: app + cluster: + name: postgis-example + extensions: + - name: postgis + - name: postgis_topology + - name: fuzzystrmatch + - name: postgis_tiger_geocoder ``` -The example relies on the `postInitTemplateSQL` option which executes a list of -queries against the `template1` database, before the actual creation of the -application database (called `app`). This means that, once you have applied the -manifest and the cluster is up, you will have the above extensions installed in -both the template database and the application database, ready for use. +The example leverages the `Database` resource's declarative extension +management to add the specified extensions to the `app` database. !!! Info - Take some time and look at the available options in `.spec.bootstrap.initdb` - from the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BootstrapInitDB), such as - `postInitApplicationSQL`. + For more details, see the + ["Managing Extensions in a Database" section](declarative_database_management.md#managing-extensions-in-a-database). You can easily verify the available version of PostGIS that is in the container, by connecting to the `app` database (you might obtain different values from the ones in this document): ```console -$ kubectl exec -ti postgis-example-1 -- psql app -Defaulted container "postgres" out of: postgres, bootstrap-controller (init) -psql (16.4 (Debian 16.4-1.pgdg110+1)) +$ kubectl cnpg psql postgis-example -- app +psql (17.5 (Debian 17.5-1.pgdg110+2)) Type "help" for help. app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1; name | default_version | installed_version | comment --------------------------+-----------------+-------------------+------------------------------------------------------------ - postgis | 3.2.2 | 3.2.2 | PostGIS geometry and geography spatial types and functions - postgis-3 | 3.2.2 | | PostGIS geometry and geography spatial types and functions - postgis_raster | 3.2.2 | | PostGIS raster types and functions - postgis_raster-3 | 3.2.2 | | PostGIS raster types and functions - postgis_sfcgal | 3.2.2 | | PostGIS SFCGAL functions - postgis_sfcgal-3 | 3.2.2 | | PostGIS SFCGAL functions - postgis_tiger_geocoder | 3.2.2 | 3.2.2 | PostGIS tiger geocoder and reverse geocoder - postgis_tiger_geocoder-3 | 3.2.2 | | PostGIS tiger geocoder and reverse geocoder - postgis_topology | 3.2.2 | 3.2.2 | PostGIS topology spatial types and functions - postgis_topology-3 | 3.2.2 | | PostGIS topology spatial types and functions + postgis | 3.5.2 | 3.5.2 | PostGIS geometry and geography spatial types and functions + postgis-3 | 3.5.2 | | PostGIS geometry and geography spatial types and functions + postgis_raster | 3.5.2 | | PostGIS raster types and functions + postgis_raster-3 | 3.5.2 | | PostGIS raster types and functions + postgis_sfcgal | 3.5.2 | | PostGIS SFCGAL functions + postgis_sfcgal-3 | 3.5.2 | | PostGIS SFCGAL functions + postgis_tiger_geocoder | 3.5.2 | 3.5.2 | PostGIS tiger geocoder and reverse geocoder + postgis_tiger_geocoder-3 | 3.5.2 | | PostGIS tiger geocoder and reverse geocoder + postgis_topology | 3.5.2 | 3.5.2 | PostGIS topology spatial types and functions + postgis_topology-3 | 3.5.2 | | PostGIS topology spatial types and functions (10 rows) ``` -The next step is to verify that the extensions listed in the -`postInitTemplateSQL` section have been correctly installed in the `app` -database. +The next step is to verify that the extensions listed in the `Database` +resource have been correctly installed in the `app` database. ```console app=# \dx List of installed extensions Name | Version | Schema | Description ------------------------+---------+------------+------------------------------------------------------------ - fuzzystrmatch | 1.1 | public | determine similarities and distance between strings + fuzzystrmatch | 1.2 | public | determine similarities and distance between strings plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language - postgis | 3.2.2 | public | PostGIS geometry and geography spatial types and functions - postgis_tiger_geocoder | 3.2.2 | tiger | PostGIS tiger geocoder and reverse geocoder - postgis_topology | 3.2.2 | topology | PostGIS topology spatial types and functions + postgis | 3.5.2 | public | PostGIS geometry and geography spatial types and functions + postgis_tiger_geocoder | 3.5.2 | tiger | PostGIS tiger geocoder and reverse geocoder + postgis_topology | 3.5.2 | topology | PostGIS topology spatial types and functions (5 rows) ``` @@ -142,6 +147,6 @@ Finally: app=# SELECT postgis_full_version(); postgis_full_version ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - POSTGIS="3.2.2 628da50" [EXTENSION] PGSQL="140" GEOS="3.9.0-CAPI-1.16.2" PROJ="7.2.1" LIBXML="2.9.10" LIBJSON="0.15" LIBPROTOBUF="1.3.3" WAGYU="0.5.0 (Internal)" TOPOLOGY + POSTGIS="3.5.2 dea6d0a" [EXTENSION] PGSQL="170" GEOS="3.9.0-CAPI-1.16.2" PROJ="7.2.1 NETWORK_ENABLED=OFF URL_ENDPOINT=https://cdn.proj.org USER_WRITABLE_DIRECTORY=/tmp/proj DATABASE_PATH=/usr/share/proj/proj.db" (compiled against PROJ 7.2.1) LIBXML="2.9.10" LIBJSON="0.15" LIBPROTOBUF="1.3.3" WAGYU="0.5.0 (Internal)" TOPOLOGY (1 row) ``` diff --git a/docs/src/postgres_upgrades.md b/docs/src/postgres_upgrades.md new file mode 100644 index 0000000000..35a17057cf --- /dev/null +++ b/docs/src/postgres_upgrades.md @@ -0,0 +1,218 @@ +# PostgreSQL Upgrades + + +PostgreSQL upgrades fall into two categories: + +- [Minor version upgrades](#minor-version-upgrades) (e.g., from 17.0 to 17.1) +- [Major version upgrades](#major-version-upgrades) (e.g., from 16.x to 17.0) + +## Minor Version Upgrades + +PostgreSQL version numbers follow a `major.minor` format. For instance, in +version 17.1: + +- `17` is the major version +- `1` is the minor version + +Minor releases are fully compatible with earlier and later minor releases of +the same major version. They include bug fixes and security updates but do not +introduce changes to the internal storage format. +For example, PostgreSQL 17.1 is compatible with 17.0 and 17.5. + +### Upgrading a Minor Version in CloudNativePG + +To upgrade to a newer minor version, simply update the PostgreSQL container +image reference in your cluster definition, either directly or via image catalogs. +CloudNativePG will trigger a [rolling update of the cluster](rolling_update.md), +replacing each instance one by one, starting with the replicas. Once all +replicas have been updated, it will perform either a switchover or a restart of +the primary to complete the process. + +## Major Version Upgrades + +Major PostgreSQL releases introduce changes to the internal data storage +format, requiring a more structured upgrade process. + +CloudNativePG supports three methods for performing major upgrades: + +1. [Logical dump/restore](database_import.md) – Blue/green deployment, offline. +2. [Native logical replication](logical_replication.md#example-of-live-migration-and-major-postgres-upgrade-with-logical-replication) – Blue/green deployment, online. +3. Physical with `pg_upgrade` – In-place upgrade, offline (covered in the + ["Offline In-Place Major Upgrades" section](#offline-in-place-major-upgrades) below). + +Each method has trade-offs in terms of downtime, complexity, and data volume +handling. The best approach depends on your upgrade strategy and operational +constraints. + +!!! Important + We strongly recommend testing all methods in a controlled environment + before proceeding with a production upgrade. + +## Offline In-Place Major Upgrades + +CloudNativePG performs an **offline in-place major upgrade** when a new operand +container image with a higher PostgreSQL major version is declaratively +requested for a cluster. + +!!! Important + Major upgrades are only supported between images based on the same + operating system distribution. For example, if your previous version uses a + `bullseye` image, you cannot upgrade to a `bookworm` image. + +!!! Warning + There is a bug in PostgreSQL 17.0 through 17.5 that prevents successful upgrades + if the `max_slot_wal_keep_size` parameter is set to any value other than `-1`. + The upgrade process will fail with an error related to replication slot configuration. + This issue has been [fixed in PostgreSQL 17.6 and 18beta2 or later versions](https://github.com/postgres/postgres/commit/f36e5774). + If you are using PostgreSQL 17.0 through 17.5, ensure that you upgrade to at least + PostgreSQL 17.6 before attempting a major upgrade, or make sure to temporarily set + the `max_slot_wal_keep_size` parameter to `-1` in your cluster configuration. + +You can trigger the upgrade in one of two ways: + +- By updating the major version in the image tag via the `.spec.imageName` + option. +- Using an [image catalog](image_catalog.md) to manage version changes. + +For details on supported image tags, see +["Image Tag Requirements"](container_images.md#image-tag-requirements). + +!!! Warning + CloudNativePG is not responsible for PostgreSQL extensions. You must ensure + that extensions in the source PostgreSQL image are compatible with those in the + target image and that upgrade paths are supported. Thoroughly test the upgrade + process in advance to avoid unexpected issues. + The [extensions management feature](declarative_database_management.md#managing-extensions-in-a-database) + can help manage extension upgrades declaratively. + +### Upgrade Process + +1. Shuts down all cluster pods to ensure data consistency. +2. Records the previous PostgreSQL version and image in the cluster’s status under + `.status.pgDataImageInfo`. +3. Initiates a new upgrade job, which: + - Verifies that the binaries in the image and the data files align with a + major upgrade request. + - Creates new directories for `PGDATA`, and where applicable, WAL files and + tablespaces. + - Performs the upgrade using `pg_upgrade` with the `--link` option. + - Upon successful completion, replaces the original directories with their + upgraded counterparts. + +!!! Warning + During the upgrade process, the entire PostgreSQL cluster, including + replicas, is unavailable to applications. Ensure that your system can + tolerate this downtime before proceeding. + +!!! Warning + Performing an in-place upgrade is an exceptional operation that carries inherent + risks. It is strongly recommended to take a full backup of the cluster before + initiating the upgrade process. + +!!! Info + For detailed guidance on `pg_upgrade`, refer to the official + [PostgreSQL documentation](https://www.postgresql.org/docs/current/pgupgrade.html). + +### Post-Upgrade Actions + +If the upgrade is successful, CloudNativePG: + +- Destroys the PVCs of replicas (if available). +- Scales up replicas as required. + +!!! Warning + Re-cloning replicas can be time-consuming, especially for very large + databases. Plan accordingly to accommodate potential delays. After completing + the upgrade, it is strongly recommended to take a full backup. Existing backup + data (namely base backups and WAL files) is only available for the previous + minor PostgreSQL release. + +!!! Warning + `pg_upgrade` doesn't transfer optimizer statistics. After the upgrade, you + may want to run `ANALYZE` on your databases to update them. + +If the upgrade fails, you must manually revert the major version change in the +cluster's configuration and delete the upgrade job, as CloudNativePG cannot +automatically decide the rollback. + +!!! Important + This process **protects your existing database from data loss**, as no data + is modified during the upgrade. If the upgrade fails, a rollback is + usually possible, without having to perform a full recovery from a backup. + Ensure you monitor the process closely and take corrective action if needed. + +### Example: Performing a Major Upgrade + +Consider the following PostgreSQL cluster running version 16: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + imageName: ghcr.io/cloudnative-pg/postgresql:16-minimal-bookworm + instances: 3 + storage: + size: 1Gi +``` + +You can check the current PostgreSQL version using the following command: + +```sh +kubectl cnpg psql cluster-example -- -qAt -c 'SELECT version()' +``` + +This will return output similar to: + +```console +PostgreSQL 16.x ... +``` + +To upgrade the cluster to version 17, update the `imageName` field by changing +the major version tag from `16` to `17`: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + imageName: ghcr.io/cloudnative-pg/postgresql:17-minimal-bookworm + instances: 3 + storage: + size: 1Gi +``` + +### Upgrade Process + +1. Cluster shutdown – All cluster pods are terminated to ensure a consistent + upgrade. +2. Upgrade job execution – A new job is created with the name of the primary + pod, appended with the suffix `-major-upgrade`. This job runs `pg_upgrade` + on the primary’s persistent volume group. +3. Post-upgrade steps: + - The PVC groups of the replicas (`cluster-example-2` and + `cluster-example-3`) are removed. + - The primary pod is restarted. + - Two new replicas (`cluster-example-4` and `cluster-example-5`) are + re-cloned from the upgraded primary. + +Once the upgrade is complete, you can verify the new major version by running +the same command: + +```sh +kubectl cnpg psql cluster-example -- -qAt -c 'SELECT version()' +``` + +This should now return output similar to: + +```console +PostgreSQL 17.x ... +``` + +You can now update the statistics by running `ANALYZE` on the `app` database: + +```sh +kubectl cnpg psql cluster-example -- app -c 'ANALYZE' +``` diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index c85573a4d0..368815cac4 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -1,4 +1,5 @@ # PostgreSQL Configuration + Users that are familiar with PostgreSQL are aware of the existence of the following three files to configure an instance: @@ -64,8 +65,9 @@ operator by applying the following sections in this order: The **global default parameters** are: ```text -archive_mode = 'on' +archive_timeout = '5min' dynamic_shared_memory_type = 'posix' +full_page_writes = 'on' logging_collector = 'on' log_destination = 'csvlog' log_directory = '/controller/log' @@ -76,9 +78,11 @@ log_truncate_on_rotation = 'false' max_parallel_workers = '32' max_replication_slots = '32' max_worker_processes = '32' -shared_memory_type = 'mmap' # for PostgreSQL >= 12 only -wal_keep_size = '512MB' # for PostgreSQL >= 13 only -wal_keep_segments = '32' # for PostgreSQL <= 12 only +shared_memory_type = 'mmap' +shared_preload_libraries = '' +ssl_max_protocol_version = 'TLSv1.3' +ssl_min_protocol_version = 'TLSv1.3' +wal_keep_size = '512MB' wal_level = 'logical' wal_log_hints = 'on' wal_sender_timeout = '5s' @@ -111,7 +115,6 @@ The following parameters are **fixed** and exclusively controlled by the operato ```text archive_command = '/controller/manager wal-archive %p' -full_page_writes = 'on' hot_standby = 'true' listen_addresses = '*' port = '5432' @@ -127,17 +130,52 @@ Since the fixed parameters are added at the end, they can't be overridden by the user via the YAML configuration. Those parameters are required for correct WAL archiving and replication. -### Replication settings +### Write-Ahead Log Level + +The [`wal_level`](https://www.postgresql.org/docs/current/runtime-config-wal.html) +parameter in PostgreSQL determines the amount of information written to the +Write-Ahead Log (WAL). It accepts the following values: + +- `minimal`: Writes only the information required for crash recovery. +- `replica`: Adds sufficient information to support WAL archiving and streaming + replication, including the ability to run read-only queries on standby + instances. +- `logical`: Includes all information from `replica`, plus additional information + required for logical decoding and replication. + +By default, upstream PostgreSQL sets `wal_level` to `replica`. CloudNativePG, +instead, sets `wal_level` to `logical` by default to enable logical replication +out of the box. This makes it easier to support use cases such as migrations +from external PostgreSQL servers. + +If your cluster does not require logical replication, it is recommended to set +`wal_level` to `replica` to reduce WAL volume and overhead. + +Finally, CloudNativePG allows `wal_level` to be set to `minimal` only for +single-instance clusters with WAL archiving disabled. -The `primary_conninfo`, `restore_command`, and `recovery_target_timeline` -parameters are managed automatically by the operator according to the state of -the instance in the cluster. +### Replication Settings + +The `primary_conninfo`, `restore_command`, and `recovery_target_timeline` +parameters are automatically managed by the operator based on the instance's +role within the cluster. These parameters are effectively applied only when the +instance is operating as a replica. ```text -primary_conninfo = 'host=cluster-example-rw user=postgres dbname=postgres' +primary_conninfo = 'host= user=postgres dbname=postgres' recovery_target_timeline = 'latest' ``` +The [`STANDBY_TCP_USER_TIMEOUT` operator configuration setting](operator_conf.md#available-options), +if specified, sets the `tcp_user_timeout` parameter on all standby instances +managed by the operator. + +The `tcp_user_timeout` parameter determines how long transmitted data can +remain unacknowledged before the TCP connection is forcibly closed. Adjusting +this value allows you to fine-tune the responsiveness of standby instances to +network disruptions. For more details, refer to the +[PostgreSQL documentation](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-TCP-USER-TIMEOUT). + ### Log control settings The operator requires PostgreSQL to output its log in CSV format, and the @@ -212,6 +250,12 @@ SELECT datname FROM pg_database WHERE datallowconn !!! Note The above query also includes template databases like `template1`. +!!! Important + With the introduction of [declarative extensions](declarative_database_management.md#managing-extensions-in-a-database) + in the `Database` CRD, you can now manage extensions directly. As a result, + the managed extensions feature may undergo significant changes in future + versions of CloudNativePG, and some functionalities might be deprecated. + #### Enabling `auto_explain` The [`auto_explain`](https://www.postgresql.org/docs/current/auto-explain.html) @@ -329,8 +373,9 @@ Fixed rules: ```text local all all peer -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica +hostssl all cnpg_pooler_pgbouncer all cert map=cnpg_pooler_pgbouncer ``` Default rules: @@ -352,8 +397,9 @@ The resulting `pg_hba.conf` will look like this: ```text local all all peer -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica +hostssl all cnpg_pooler_pgbouncer all cert map=cnpg_pooler_pgbouncer @@ -592,7 +638,6 @@ Users are not allowed to set the following configuration parameters in the - `data_sync_retry` - `event_source` - `external_pid_file` -- `full_page_writes` - `hba_file` - `hot_standby` - `ident_file` @@ -642,4 +687,3 @@ Users are not allowed to set the following configuration parameters in the - `unix_socket_directories` - `unix_socket_group` - `unix_socket_permissions` - diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 1331deff22..537fc7651b 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -1,4 +1,5 @@ # Preview Versions + CloudNativePG candidate releases are pre-release versions made available for testing before the community issues a new generally available (GA) release. @@ -36,11 +37,10 @@ are not backwards compatible and could be removed entirely. There are currently no preview versions available. - diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md index a0b7b55e6c..a076829b17 100644 --- a/docs/src/quickstart.md +++ b/docs/src/quickstart.md @@ -1,4 +1,5 @@ # Quickstart + This section guides you through testing a PostgreSQL cluster on your local machine by deploying CloudNativePG on a local Kubernetes cluster @@ -165,7 +166,7 @@ In this section we show how to deploy Prometheus and Grafana for observability, and how to create a Grafana Dashboard to monitor CloudNativePG clusters, and a set of Prometheus Rules defining alert conditions. -We leverage the [Kube-Prometheus stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), +We leverage the [Kube-Prometheus stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart, which is maintained by the [Prometheus Community](https://github.com/prometheus-community). Please refer to the project website for additional documentation and background. @@ -183,7 +184,8 @@ If you don't have [Helm](https://helm.sh) installed yet, please follow the system. We need to add the `prometheus-community` helm chart repository, and then -install the *Kube Prometheus stack* using the sample configuration we provide: +install the *Kube Prometheus stack* with our sample configuration +[`kube-stack-config.yaml`](./samples/monitoring/kube-stack-config.yaml). We can accomplish this with the following commands: @@ -197,16 +199,17 @@ helm upgrade --install \ prometheus-community/kube-prometheus-stack ``` -After completion, you will have Prometheus, Grafana and Alert Manager installed with values from the -`kube-stack-config.yaml` file: +After completion, you will have Prometheus, Grafana, and Alert Manager, +configured with the `kube-stack-config.yaml` file: -- From the Prometheus installation, you will have the Prometheus Operator watching for **any** `PodMonitor` - (see [*monitoring*](monitoring.md)). -- The Grafana installation will be watching for a Grafana dashboard `ConfigMap`. +- From the Prometheus installation, you will have the Prometheus Operator + watching for **any** `PodMonitor` (see [*monitoring*](monitoring.md)). +- Alert Manager and Grafana are both enabled. !!! Seealso - For further information about the above command, refer to the [helm install](https://helm.sh/docs/helm/helm_install/) - documentation. + For further information about the above helm commands, refer to the [helm + install](https://helm.sh/docs/helm/helm_install/) + documentation. You can see several Custom Resources have been created: @@ -235,7 +238,7 @@ prometheus-community-kube-prometheus ClusterIP 9090/TCP ### Viewing with Prometheus -At this point, a CloudNativePG cluster deployed with Monitoring activated +At this point, a CloudNativePG cluster deployed with monitoring activated would be observable via Prometheus. For example, you could deploy a simple cluster with `PodMonitor` enabled: @@ -266,13 +269,15 @@ kubectl port-forward svc/prometheus-community-kube-prometheus 9090 Then access the Prometheus console locally at: [`http://localhost:9090/`](http://localhost:9090/) -Assuming that the monitoring stack was successfully deployed, and you have a Cluster with `enablePodMonitor: true`, -you should find a series of metrics relating to CloudNativePG clusters. Again, please -refer to the [*monitoring section*](monitoring.md) for more information. +You should find a series of metrics relating to CloudNativePG clusters. +Please refer to the [monitoring section](monitoring.md) for more information. ![local prometheus](images/prometheus-local.png) -You can now define some alerts by creating a `prometheusRule`: +You can also monitor the CloudNativePG operator by creating a PodMonitor to +target it. See the relevant section in the [monitoring page](monitoring.md#monitoring-the-operator-with-prometheus). + +You can define some alerts by creating a `prometheusRule`: ``` sh kubectl apply -f \ @@ -292,28 +297,32 @@ we just installed. ### Grafana Dashboard -In our "plain" installation, Grafana is deployed with no predefined dashboards. +In our installation so far, Grafana is deployed with no predefined dashboards. -You can port-forward: +To open Grafana, you can port-forward the grafana service: ``` sh kubectl port-forward svc/prometheus-community-grafana 3000:80 ``` -And access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/) -providing the credentials `admin` as username, `prom-operator` as password (defined in `kube-stack-config.yaml`). +and access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/) +providing the credentials `admin` as username, `prom-operator` as password +(defined in `kube-stack-config.yaml`). -CloudNativePG provides a default dashboard for Grafana as part of the official -[Helm chart](https://github.com/cloudnative-pg/charts). You can also download the +CloudNativePG provides a default dashboard for Grafana in the dedicated +[`grafana-dashboards` repository](https://github.com/cloudnative-pg/grafana-dashboards). +You can download the file [grafana-dashboard.json](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) -file and manually importing it via the GUI. +and manually import it via the GUI (menu: Dashboards > New > Import). +You can now click on the `CloudNativePG` dashboard just created: + +![local grafana](images/grafana-local.png) !!! Warning Some graphs in the previous dashboard make use of metrics that are in alpha stage by the time this was created, like `kubelet_volume_stats_available_bytes` and `kubelet_volume_stats_capacity_bytes` producing some graphs to show `No data`. -![local grafana](images/grafana-local.png) - -Note that in our local setup, Prometheus and Grafana are configured to automatically discover -and monitor any CloudNativePG clusters deployed with the Monitoring feature enabled. +Note that in our local setup, Prometheus and Grafana are configured to +automatically discover and monitor any CloudNativePG clusters deployed with the +Monitoring feature enabled. diff --git a/docs/src/recovery.md b/docs/src/recovery.md index 1e0a9b931a..85779fd5df 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -1,61 +1,86 @@ # Recovery + -In PostgreSQL terminology, recovery is the process of starting a PostgreSQL -instance using an existing backup. The PostgreSQL recovery mechanism -is very solid and rich. It also supports point-in-time recovery (PITR), which allows -you to restore a given cluster up to any point in time, from the first available -backup in your catalog to the last archived WAL. (The WAL -archive is mandatory in this case.) +In PostgreSQL, **recovery** refers to the process of starting an instance from +an existing physical backup. PostgreSQL's recovery system is robust and +feature-rich, supporting **Point-In-Time Recovery (PITR)**—the ability to +restore a cluster to any specific moment, from the earliest available backup to +the latest archived WAL file. -In CloudNativePG, you can't perform recovery in place on an existing -cluster. Recovery is instead a way to bootstrap a new Postgres cluster -starting from an available physical backup. +!!! Important + A valid WAL archive is required to perform PITR. + +In CloudNativePG, recovery is **not performed in-place** on an existing +cluster. Instead, it is used to **bootstrap a new cluster** from a physical +backup. !!! Note - For details on the `bootstrap` stanza, see + For more details on configuring the `bootstrap` stanza, refer to [Bootstrap](bootstrap.md). -The `recovery` bootstrap mode lets you create a cluster from an existing -physical base backup. You then reapply the WAL files containing the REDO log -from the archive. +The `recovery` bootstrap mode allows you to initialize a cluster from a +physical base backup and replay the associated WAL files to bring the system to +a consistent and optionally point-in-time state. -WAL files are pulled from the defined *recovery object store*. +CloudNativePG supports recovery via: -Base backups can be taken either on object stores or using volume snapshots. +- A **pluggable backup and recovery interface (CNPG-I)**, enabling integration + with external tools such as the [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/). +- **Native recovery from volume snapshots**, where supported by the underlying + Kubernetes storage infrastructure. +- **Native recovery from object stores via Barman Cloud**, which is + **deprecated** as of version 1.26 in favor of the plugin-based approach. -You can achieve recovery from a *recovery object store* in two ways: +With the deprecation of native Barman Cloud support in version 1.26, this +section now focuses on two supported recovery methods: using the **Barman Cloud +Plugin** for recovery from object stores, and the **native interface** for +recovery from volume snapshots. -- We recommend using a recovery object store, that is, a backup of another cluster - created by Barman Cloud and defined by way of the `barmanObjectStore` option - in the `externalClusters` section. -- Alternatively, you can use an existing `Backup` object in the same namespace. +!!! Important + For legacy documentation, see + [Appendix B – Recovery from an Object Store](appendixes/backup_barmanobjectstore.md#recovery-from-an-object-store). -Both recovery methods enable either full recovery (up to the last -available WAL) or up to a [point in time](#point-in-time-recovery-pitr). -When performing a full recovery, you can also start the cluster -in replica mode (see [replica clusters](replica_cluster.md) for reference). +## Recovery from an Object Store with the Barman Cloud Plugin -!!! Important - If using replica mode, make sure that the PostgreSQL configuration - (`.spec.postgresql.parameters`) of the recovered cluster is compatible with - the original one from a physical replication standpoint. +This section outlines how to recover a PostgreSQL cluster from an object store +using the recommended Barman Cloud Plugin. -For recovery using *volume snapshots*: +!!! Important + The object store must contain backup data produced by a CloudNativePG + `Cluster`—either using the **deprecated native Barman Cloud integration** or + the **Barman Cloud Plugin**. -- Use a consistent set of `VolumeSnapshot` objects that all belong to the - same backup and are identified by the same `cnpg.io/cluster` and - `cnpg.io/backupName` labels. Then, recover through the `volumeSnapshots` - option in the `.spec.bootstrap.recovery` stanza, as described in - [Recovery from `VolumeSnapshot` objects](#recovery-from-volumesnapshot-objects). +!!! Info + For full details, refer to the + [“Recovery of a Postgres Cluster” section in the Barman Cloud Plugin documentation](https://cloudnative-pg.io/plugin-barman-cloud/docs/concepts/#recovery-of-a-postgres-cluster). -## Recovery from an object store +Begin by defining the object store that holds both your base backups and WAL +files. The Barman Cloud Plugin uses a custom `ObjectStore` resource for this +purpose. The following example shows how to configure one for Azure Blob +Storage: -You can recover from a backup created by Barman Cloud and stored on a supported -object store. After you define the external cluster, including all the required -configuration in the `barmanObjectStore` section, you need to reference it in -the `.spec.recovery.source` option. +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: cluster-example-backup +spec: + configuration: + destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ + azureCredentials: + storageAccount: + name: recovery-object-store-secret + key: storage_account_name + storageKey: + name: recovery-object-store-secret + key: storage_account_key + wal: + maxParallel: 8 +``` -This example defines a recovery object store in a blob container in Azure: +Next, configure the `Cluster` resource to use the `ObjectStore` you defined. In +the `bootstrap` section, specify the recovery source, and define an +`externalCluster` entry that references the plugin: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -64,61 +89,45 @@ metadata: name: cluster-restore spec: [...] - + superuserSecret: name: superuser-secret bootstrap: recovery: - source: clusterBackup + source: origin externalClusters: - - name: clusterBackup - barmanObjectStore: - destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ - azureCredentials: - storageAccount: - name: recovery-object-store-secret - key: storage_account_name - storageKey: - name: recovery-object-store-secret - key: storage_account_key - wal: - maxParallel: 8 + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` -The previous example assumes that the application database and its owning user -are named `app` by default. If the PostgreSQL cluster being restored uses -different names, you must specify these names before exiting the recovery phase, -as documented in ["Configure the application database"](#configure-the-application-database). - -!!! Important - By default, the `recovery` method strictly uses the `name` of the - cluster in the `externalClusters` section as the name of the main folder - of the backup data within the object store. This name is normally reserved - for the name of the server. You can specify a different folder name - using the `barmanObjectStore.serverName` property. - -!!! Note - This example takes advantage of the parallel WAL restore feature, - dedicating up to 8 jobs to concurrently fetch the required WAL files from the - archive. This feature can appreciably reduce the recovery time. Make sure that - you plan ahead for this scenario and correctly tune the value of this parameter - for your environment. It will make a difference when you need it, and you will. - -## Recovery from `VolumeSnapshot` objects +## Recovery from `VolumeSnapshot` Objects !!! Warning - When creating replicas after recovering the primary instance from - the volume snapshot, the operator might end up using `pg_basebackup` - to synchronize them. This behavior results in a slower process, depending - on the size of the database. This limitation will be lifted in the future when - support for online backups and PVC cloning are introduced. - -CloudNativePG can create a new cluster from a `VolumeSnapshot` of a PVC of an -existing `Cluster` that's been taken using the declarative API for [volume -snapshot backups](backup_volumesnapshot.md). You must specify the name of the -snapshot, as in the following example: + When creating replicas after recovering a primary instance from a + `VolumeSnapshot`, the operator may fall back to using `pg_basebackup` to + synchronize them. This process can be significantly slower—especially for large + databases—because it involves a full base backup. This limitation will be + addressed in the future with support for online backups and PVC cloning in + the scale-up process. + +CloudNativePG allows you to create a new cluster from a `VolumeSnapshot` of a +`PersistentVolumeClaim` (PVC) that belongs to an existing `Cluster`. +These snapshots are created using the declarative API for +[volume snapshot backups](appendixes/backup_volumesnapshot.md). + +To complete the recovery process, the new cluster must also reference an +external cluster that provides access to the WAL archive needed to reapply +changes and finalize the recovery. + +The following example shows a cluster being recovered using both a +`VolumeSnapshot` for the base backup and a WAL archive accessed through the +Barman Cloud Plugin: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -130,11 +139,20 @@ spec: bootstrap: recovery: + source: origin volumeSnapshots: storage: name: kind: VolumeSnapshot apiGroup: snapshot.storage.k8s.io + + externalClusters: + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` In case the backed-up cluster was using a separate PVC to store the WAL files, @@ -248,9 +266,9 @@ feature to work if you specify a recovery target. ### PITR from an object store -This example uses a recovery object store in Azure that contains both -the base backups and the WAL archive. The recovery target is based on a -requested timestamp. +This example uses the same recovery object store in Azure defined earlier for +the Barman Cloud plugin, containing both the base backups and the WAL archive. +The recovery target is based on a requested timestamp. ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -266,24 +284,18 @@ spec: bootstrap: recovery: # Recovery object store containing WAL archive and base backups - source: clusterBackup + source: origin recoveryTarget: # Time base target for the recovery targetTime: "2023-08-11 11:14:21.00000+02" externalClusters: - - name: clusterBackup - barmanObjectStore: - destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ - azureCredentials: - storageAccount: - name: recovery-object-store-secret - key: storage_account_name - storageKey: - name: recovery-object-store-secret - key: storage_account_key - wal: - maxParallel: 8 + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` In this example, you had to specify only the `targetTime` in the form of a @@ -308,17 +320,20 @@ the recovery as follows: - Otherwise, the operator selects the last available backup, in chronological order. -### PITR from `VolumeSnapshot` objects +### Point-in-Time Recovery (PITR) from `VolumeSnapshot` Objects -The example that follows uses: +The following example demonstrates how to perform a **Point-in-Time Recovery (PITR)** using: -- A Kubernetes volume snapshot for the `PGDATA` containing the base backup from - which to start the recovery process. This snapshot is identified in the - `recovery.volumeSnapshots` section and called `test-snapshot-1`. -- A recovery object store in MinIO containing the WAL archive. The object store is identified by - the `recovery.source` option in the form of an external cluster definition. +- A Kubernetes `VolumeSnapshot` of the `PGDATA` directory, which provides the + base backup. This snapshot is specified in the `recovery.volumeSnapshots` + section and is named `test-snapshot-1`. +- A recovery object store (in this case, MinIO) containing the archived WAL + files. The object store is defined via a Barman Cloud Plugin `ObjectStore` + resource (not shown here), and referenced using the `recovery.source` field, + which points to an external cluster configuration. -The recovery target is based on a requested timestamp. +The cluster will be restored to a specific point in time using the +`recoveryTarget.targetTime` option. ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -329,7 +344,7 @@ spec: # ... bootstrap: recovery: - source: cluster-example-with-backup + source: origin volumeSnapshots: storage: name: test-snapshot-1 @@ -338,19 +353,18 @@ spec: recoveryTarget: targetTime: "2023-07-06T08:00:39" externalClusters: - - name: cluster-example-with-backup - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - s3Credentials: - accessKeyId: - name: minio - key: ACCESS_KEY_ID - secretAccessKey: - name: minio - key: ACCESS_SECRET_KEY + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: minio-backup + serverName: cluster-example ``` +This setup enables CloudNativePG to restore the base data from a volume +snapshot and apply WAL segments from the object store to reach the desired +recovery target. + !!! Note If the backed-up cluster had `walStorage` enabled, you also must specify the volume snapshot containing the `PGWAL` directory, as mentioned in @@ -360,6 +374,12 @@ spec: It's your responsibility to ensure that the end time of the base backup in the volume snapshot is before the recovery target timestamp. +!!! Warning + If you added or removed a [tablespace](tablespaces.md) in your cluster + since the last base backup, replaying the WAL will fail. You need a base + backup between the time of the tablespace change and the recovery target + timestamp. + ### Recovery targets Here are the recovery target criteria you can use: @@ -369,6 +389,11 @@ targetTime [RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339) format. (The precise stopping point is also influenced by the `exclusive` option.) +!!! Warning + PostgreSQL recovery will stop when it encounters the first transaction that + occurs after the specified time. If no such transaction exists after the + target time, the recovery process will fail. + targetXID : Transaction ID up to which recovery proceeds. (The precise stopping point is also influenced by the `exclusive` option.) @@ -404,7 +429,7 @@ kind: Cluster [...] bootstrap: recovery: - source: clusterBackup + source: origin recoveryTarget: backupID: 20220616T142236 targetName: 'restore_point_1' @@ -439,25 +464,19 @@ spec: bootstrap: recovery: - source: clusterBackup + source: origin recoveryTarget: backupID: 20220616T142236 targetName: "maintenance-activity" exclusive: true externalClusters: - - name: clusterBackup - barmanObjectStore: - destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ - azureCredentials: - storageAccount: - name: recovery-object-store-secret - key: storage_account_name - storageKey: - name: recovery-object-store-secret - key: storage_account_key - wal: - maxParallel: 8 + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` ## Configure the application database @@ -539,10 +558,10 @@ When the base backup recovery process is complete, the operator starts the Postgres instance in recovery mode. In this phase, PostgreSQL is up, though not able to accept connections, and the pod is healthy according to the liveness probe. By way of the `restore_command`, PostgreSQL starts fetching WAL -files from the archive. (You can speed up this phase by setting the -`maxParallel` option and enabling the parallel WAL restore capability.) +files from the archive. You can speed up this phase by setting the +`maxParallel` option and enabling the parallel WAL restore capability. -This phase terminates when PostgreSQL reaches the target (either the end of the +This phase terminates when PostgreSQL reaches the target, either the end of the WAL or the required target in case of PITR. You can optionally specify a `recoveryTarget` to perform a PITR. If left unspecified, the recovery continues up to the latest available WAL on the default target timeline (`latest`). @@ -554,57 +573,28 @@ remaining instances join the cluster as replicas. The process is transparent for the user and is managed by the instance manager running in the pods. -## Restoring into a cluster with a backup section - - - -A manifest for a cluster restore might include a `backup` section. This means -that,after recovery, the new cluster starts archiving WALs and taking backups -if configured to do so. - -For example, this section is part of a manifest for a cluster bootstrapping -from the cluster `cluster-example-backup`. In the storage bucket, it creates a -folder named `recoveredCluster`, where the base backups and WALs of the -recovered cluster are stored. - -``` yaml - backup: - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - serverName: "recoveredCluster" - s3Credentials: - accessKeyId: - name: minio - key: ACCESS_KEY_ID - secretAccessKey: - name: minio - key: ACCESS_SECRET_KEY - retentionPolicy: "30d" +## Restoring into a Cluster with a Backup Section - externalClusters: - - name: cluster-example-backup - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - s3Credentials: -``` +When restoring a cluster, the manifest may include a `plugins` section with +Barman Cloud plugin pointing to a *backup* object store resource. This enables +the newly created cluster to begin archiving WAL files and taking backups +immediately after recovery—provided backup policies are configured. -Don't reuse the same `barmanObjectStore` configuration for different clusters. -There might be cases where the existing information in the storage buckets -could be overwritten by the new cluster. +Avoid reusing the same `ObjectStore` configuration for both *backup* and +*recovery* in the same cluster. If you must, ensure that each cluster uses a +unique `serverName` to prevent accidental overwrites of backup or WAL archive +data. !!! Warning - The operator includes a safety check to ensure a cluster doesn't overwrite - a storage bucket that contained information. A cluster that would overwrite - existing storage remains in the state `Setting up primary` with pods in an - error state. The pod logs show: `ERROR: WAL archive check failed for server - recoveredCluster: Expected empty archive`. + CloudNativePG includes a safety check to prevent a cluster from overwriting + existing data in a shared storage bucket. If a conflict is detected, the + cluster remains in the `Setting up primary` state, and the associated pods will + fail with an error. The pod logs will display: + `ERROR: WAL archive check failed for server recoveredCluster: Expected empty archive`. !!! Important - If you set the `cnpg.io/skipEmptyWalArchiveCheck` annotation to `enabled` - in the recovered cluster, you can skip the safety check. We don't recommend - skipping the check because, for the general use case, the check works fine. - Skip this check only if you're familiar with the PostgreSQL recovery system, as - severe data loss can occur. - + You can bypass this safety check by setting the + `cnpg.io/skipEmptyWalArchiveCheck` annotation to `enabled` on the recovered + cluster. However, this is strongly discouraged unless you are highly + familiar with PostgreSQL's recovery process. Skipping the check incorrectly can + lead to severe data loss. Use with caution and only in expert scenarios. diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index f10bd1cd6d..23b707663f 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -1,15 +1,20 @@ # Release notes + History of user-visible changes for CloudNativePG, classified for each minor release. -- [CloudNativePG 1.24](release_notes/v1.24.md) -- [CloudNativePG 1.23](release_notes/v1.23.md) + +- [CloudNativePG 1.27](release_notes/v1.27.md) +- [CloudNativePG 1.26](release_notes/v1.26.md) +- [CloudNativePG 1.25](release_notes/v1.25.md) For information on the community support policy for CloudNativePG, please refer to ["Supported releases"](supported_releases.md). Older releases: +- [CloudNativePG 1.24](release_notes/old/v1.24.md) +- [CloudNativePG 1.23](release_notes/old/v1.23.md) - [CloudNativePG 1.22](release_notes/old/v1.22.md) - [CloudNativePG 1.21](release_notes/old/v1.21.md) - [CloudNativePG 1.20](release_notes/old/v1.20.md) diff --git a/docs/src/release_notes/edb-cloud-native-postgresql.md b/docs/src/release_notes/edb-cloud-native-postgresql.md index 06f5ef6d7e..26fded50ae 100644 --- a/docs/src/release_notes/edb-cloud-native-postgresql.md +++ b/docs/src/release_notes/edb-cloud-native-postgresql.md @@ -1,4 +1,5 @@ # Release notes for 1.14.0 and earlier + The first public release of CloudNativePG is version 1.15.0. Before that, the product was entirely owned by EDB and distributed under the name of diff --git a/docs/src/release_notes/v1.23.md b/docs/src/release_notes/old/v1.23.md similarity index 67% rename from docs/src/release_notes/v1.23.md rename to docs/src/release_notes/old/v1.23.md index 197a81a93e..1247a53955 100644 --- a/docs/src/release_notes/v1.23.md +++ b/docs/src/release_notes/old/v1.23.md @@ -6,6 +6,103 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.23) on the release branch in GitHub. +## Version 1.23.6 + +**Release Date:** December 23, 2024 + +!!! Warning + This is the final release in the 1.23.x series. + Users are strongly encouraged to upgrade to a newer minor version, as 1.23 + is no longer supported. + +### Enhancements + +- Enable customization of startup, liveness, and readiness probes through the + `.spec.probes` stanza. (#6266) +- Add the `cnpg.io/userType` label to secrets generated for predefined users, + specifically `superuser` and `app`. (#4392) +- Improved validation for the `spec.schedule` field in ScheduledBackups, + raising warnings for potential misconfigurations. (#5396) +- `cnpg` plugin: + - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) + +### Bug Fixes + +- Ensure the former primary flushes its WAL file queue to the archive before + re-synchronizing as a replica, reducing recovery times and enhancing data + consistency during failovers. (#6141) +- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265) +- Update the operator to set the cluster phase to `Unrecoverable` when + all previously generated `PersistentVolumeClaims` are missing. (#6170) +- Fix the parsing of the `synchronous_standby_names` GUC when + `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Resolved a potential race condition when patching certain conditions + in CRD statuses, improving reliability in concurrent updates. (#6328) +- Correct role changes to apply at the transaction level instead of the + database context. (#6064) +- Remove the `primary_slot_name` definition from the `override.conf` file on + the primary to ensure it is always empty. (#6219) +- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods + to enable seamless access to the `pgbouncer` virtual database using `psql` + from within the container. (#6247) +- Remove unnecessary updates to the Cluster status when verifying changes in + the image catalog. (#6277) +- Prevent panic during recovery from an external server without proper backup + configuration. (#6300) +- Resolved a key collision issue in structured logs, where the name field was + inconsistently used to log two distinct values. (#6324) +- Ensure proper quoting of the inRoles field in SQL statements to prevent + syntax errors in generated SQL during role management. (#6346) +- `cnpg` plugin: + - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) + - Avoid displaying physical backups block when empty with `status` command. (#5998) + +## Version 1.23.5 + +**Release date:** Oct 16, 2024 + +### Enhancements: + +- Remove the use of `pg_database_size` from the status probe, as it caused + high resource utilization by scanning the entire `PGDATA` directory to + compute database sizes. The `kubectl status` plugin will now rely on `du` + to provide detailed size information retrieval (#5689). +- Add the ability to configure the `full_page_writes` parameter in + PostgreSQL. This setting defaults to `on`, in line with PostgreSQL's + recommendations (#5516). +- Plugin: + - Add the `logs pretty` command in the `cnpg` plugin to read a log stream + from standard input and output a human-readable format, with options to + filter log entries (#5770) + - Enhance the `status` command by allowing multiple `-v` options to + increase verbosity for more detailed output (#5765). + - Add support for specifying a custom Docker image using the `--image` + flag in the `pgadmin4` plugin command, giving users control over the + Docker image used for pgAdmin4 deployments (#5515). + +### Fixes: + +- Resolve an issue with concurrent status updates when demoting a primary to a + designated primary, ensuring smoother transitions during cluster role changes + (#5755). +- Ensure that replica PodDisruptionBudgets (PDB) are removed when scaling down + to two instances, enabling easier maintenance on the node hosting the replica + (#5487). +- Prioritize full rollout over inplace restarts (#5407). +- Fix an issue that could lead to double failover in cases of lost + connectivity (#5788). +- Correctly set the `TMPDIR` and `PSQL_HISTORY` environment variables for pods + and jobs, improving temporary file and history management (#5503). +- Plugin: + - Resolve a race condition in the `logs cluster` command (#5775). + - Display the `potential` sync status in the `status` plugin (#5533). + - Fix the issue where pods deployed by the `pgadmin4` command didn’t have + a writable home directory (#5800). + +### Supported versions + +- PostgreSQL 17 (PostgreSQL 17.0 is the default image) + ## Version 1.23.4 **Release date:** Aug 22, 2024 diff --git a/docs/src/release_notes/old/v1.24.md b/docs/src/release_notes/old/v1.24.md new file mode 100644 index 0000000000..37518b0e99 --- /dev/null +++ b/docs/src/release_notes/old/v1.24.md @@ -0,0 +1,410 @@ +# Release notes for CloudNativePG 1.24 + + +History of user-visible changes in the 1.24 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24) +on the release branch in GitHub. + + +## Version 1.24.4 + +**Release date:** May 23, 2025 + +!!! Warning + This is the final release in the 1.24.x series. + Users are strongly encouraged to upgrade to a newer minor version, as 1.24 + is no longer supported. + +### Important Changes + +- **CloudNativePG is now officially a CNCF project**: CloudNativePG has been + accepted into the Cloud Native Computing Foundation (CNCF), marking a + significant milestone in its evolution. As part of this transition, the project + is now governed under **CloudNativePG, a Series of LF Projects, LLC**, ensuring + long-term sustainability and community-driven innovation. (#7203) + +### Enhancements + +- Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, + allowing users to specify the domain suffix for fully qualified domain names + (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to + `cluster.local`. (#6989) + +- Implemented the `cnpg.io/validation` annotation, enabling users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this allows unrestricted changes. (#7196) + +- Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + +### Fixes + +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). + +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). + +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). + +- Resolved a race condition that caused the operator to perform two switchovers + when updating the PostgreSQL configuration. (#6991) + +- Corrected the `PodMonitor` configuration by adjusting the `matchLabels` scope + for the targeted pooler and cluster pods. Previously, the `matchLabels` were + too broad, inadvertently inheriting labels from the cluster and leading to data + collection from unintended targets. (#7063) + +- Added a webhook warning for clusters with a missing unit (e.g., MB, GB) in + the `shared_buffers` configuration. This will become an error in future + releases. Users should update their configurations to include explicit units + (e.g., `512MB` instead of `512`). (#7160) + +- CloudNativePG Interface (CNPG-I): + + - Implemented automatic reloading of TLS certificates for plugins when they + change. (#7029) + + - Ensured the operator properly closes the plugin connection when + performing a backup using the plugin. (#7095, #7096) + + - Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). + +- `cnpg` plugin: + + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +### Changes + +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + +## Version 1.24.3 + +**Release Date:** February 28, 2025 + +### Enhancements + +- Introduced a startup probe for the operator to enhance reliability and + prevent premature liveness probe failures during initialization. (#7008) +- Added support for using the `-r` service with the Pooler. (#6868) +- Introduced an optional `--ttl` flag for the `pgbench` plugin, enabling + automatic deletion of completed jobs after a user-defined duration. (#6701) +- Marked known error messages from the Azure CSI Driver for volume snapshots as + retryable, improving resilience. (#6906) +- Updated the default PostgreSQL version to 17.4 for new cluster + definitions. (#6960) + +### Security + +- The operator image build process has been enhanced to strengthen + security and transparency. Images are now signed with `cosign`, and + OCI attestations are generated, incorporating the Software Bill of + Materials (SBOM) and provenance data. Additionally, OCI annotations + have been added to improve traceability and ensure the integrity of + the images. + +### Bug Fixes + +- Fixed inconsistent behavior in default probe knob values when `.spec.probes` + is defined, ensuring users can override all settings, including + `failureThreshold`. If unspecified in the startup probe, `failureThreshold` is + now correctly derived from `.spec.startupDelay / periodSeconds` (default: `10`, + now overridable). The same logic applies to liveness probes via + `.spec.livenessProbeTimeout`. (#6656) +- Managed service ports now take precedence over default operator-defined + ports. (#6474) +- Fixed an issue where WAL metrics were unavailable after an instance restart + until a configuration change was applied. (#6816) +- Fixed an issue in monolithic database import where role import was skipped if + no roles were specified. (#6646) +- Added support for new metrics introduced in PgBouncer 1.24. (#6630) +- Improved handling of replication-sensitive parameter reductions by ensuring + timely reconciliation after primary server restarts. (#6440) +- Introduced a new `isWALArchiver` flag in the CNPG-I plugin configuration, + allowing users to designate a plugin as a WAL archiver. This enables seamless + migration from in-tree Barman Cloud support to the plugin while maintaining WAL + archive consistency. (#6593) +- Ensured `override.conf` is consistently included in `postgresql.conf` during + replica cluster bootstrapping, preventing replication failures due to missing + configuration settings. (#6808) +- Ensured `override.conf` is correctly initialized before invoking `pg_rewind` + to prevent failures during primary role changes. (#6670) +- Enhanced webhook responses to return both warnings and errors when + applicable, improving diagnostic accuracy. (#6579) +- Ensured the operator version is correctly reconciled. (#6496) +- Improved PostgreSQL version detection by using a more precise check of the + data directory. (#6659) +- Volume Snapshot Backups: + - Fixed an issue where unused backup connections were not properly cleaned + up. (#6882) + - Ensured the instance manager closes stale PostgreSQL connections left by + failed volume snapshot backups. (#6879) + - Prevented the operator from starting a new volume snapshot backup while + another is already in progress. (#6890) +- `cnpg` plugin: + - Restored functionality of the `promote` plugin command. (#6476) + - Enhanced `kubectl cnpg report --logs ` to collect logs from all + containers, including sidecars. (#6636) + - Ensured `pgbench` jobs can run when a `Cluster` uses an `ImageCatalog`. + (#6868) + +### Technical Enhancements + +- Added support for Kubernetes `client-gen`, enabling automated generation of + Go clients for all CloudNativePG CRDs. (#6695) + +## Version 1.24.2 + +**Release Date:** December 23, 2024 + +### Enhancements + +- Enable customization of startup, liveness, and readiness probes through the + `.spec.probes` stanza. (#6266) +- Add the `cnpg.io/userType` label to secrets generated for predefined users, + specifically `superuser` and `app`. (#4392) +- Improved validation for the `spec.schedule` field in ScheduledBackups, + raising warnings for potential misconfigurations. (#5396) +- `cnpg` plugin: + - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) + +### Bug Fixes + +- Ensure the former primary flushes its WAL file queue to the archive before + re-synchronizing as a replica, reducing recovery times and enhancing data + consistency during failovers. (#6141) +- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265) +- Update the operator to set the cluster phase to `Unrecoverable` when + all previously generated `PersistentVolumeClaims` are missing. (#6170) +- Fix the parsing of the `synchronous_standby_names` GUC when + `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Resolved a potential race condition when patching certain conditions + in CRD statuses, improving reliability in concurrent updates. (#6328) +- Correct role changes to apply at the transaction level instead of the + database context. (#6064) +- Remove the `primary_slot_name` definition from the `override.conf` file on + the primary to ensure it is always empty. (#6219) +- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods + to enable seamless access to the `pgbouncer` virtual database using `psql` + from within the container. (#6247) +- Remove unnecessary updates to the Cluster status when verifying changes in + the image catalog. (#6277) +- Prevent panic during recovery from an external server without proper backup + configuration. (#6300) +- Resolved a key collision issue in structured logs, where the name field was + inconsistently used to log two distinct values. (#6324) +- Ensure proper quoting of the inRoles field in SQL statements to prevent + syntax errors in generated SQL during role management. (#6346) +- `cnpg` plugin: + - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) + - Avoid displaying physical backups block when empty with `status` command. (#5998) + +## Version 1.24.1 + +**Release date:** Oct 16, 2024 + +### Enhancements: + +- Remove the use of `pg_database_size` from the status probe, as it caused + high resource utilization by scanning the entire `PGDATA` directory to + compute database sizes. The `kubectl status` plugin will now rely on `du` + to provide detailed size information retrieval (#5689). +- Add the ability to configure the `full_page_writes` parameter in + PostgreSQL. This setting defaults to `on`, in line with PostgreSQL's + recommendations (#5516). +- Plugin: + - Add the `logs pretty` command in the `cnpg` plugin to read a log stream + from standard input and output a human-readable format, with options to + filter log entries (#5770) + - Enhance the `status` command by allowing multiple `-v` options to + increase verbosity for more detailed output (#5765). + - Add support for specifying a custom Docker image using the `--image` + flag in the `pgadmin4` plugin command, giving users control over the + Docker image used for pgAdmin4 deployments (#5515). + +### Fixes: + +- Resolve an issue with concurrent status updates when demoting a primary to a + designated primary, ensuring smoother transitions during cluster role changes + (#5755). +- Ensure that replica PodDisruptionBudgets (PDB) are removed when scaling down + to two instances, enabling easier maintenance on the node hosting the replica + (#5487). +- Prioritize full rollout over inplace restarts (#5407). +- When using `.spec.postgresql.synchronous`, ensure that the + `synchronous_standby_names` parameter is correctly set, even when no replicas + are reachable (#5831). +- Fix an issue that could lead to double failover in cases of lost + connectivity (#5788). +- Correctly set the `TMPDIR` and `PSQL_HISTORY` environment variables for pods + and jobs, improving temporary file and history management (#5503). +- Plugin: + - Resolve a race condition in the `logs cluster` command (#5775). + - Display the `potential` sync status in the `status` plugin (#5533). + - Fix the issue where pods deployed by the `pgadmin4` command didn’t have + a writable home directory (#5800). + +### Supported versions + +- PostgreSQL 17 (PostgreSQL 17.0 is the default image) + +## Version 1.24.0 + +**Release date:** Aug 22, 2024 + +### Important changes: + +- Deprecate the `role` label in the selectors of `Service` and + `PodDisruptionBudget` resources in favor of `cnpg.io/instanceRole` (#4897). +- Fix the default PodAntiAffinity configuration for PostgreSQL Pods, + allowing a PostgreSQL and a Pooler Instance to coexist on the same node when + the anti-affinity configuration is set to `required` (#5156). + +!!! Warning + The PodAntiAffinity change will trigger a rollout of all the instances when + the operator is upgraded, even when online upgrades are enabled. + +### Features: + +- **Distributed PostgreSQL Topologies**: Enhance the replica cluster feature to + create distributed database topologies for PostgreSQL that span multiple + Kubernetes clusters, enabling hybrid and multi-cloud deployments. This feature + supports: + - **Declarative Primary Control**: Easily specify which PostgreSQL cluster + acts as the primary in a distributed setup (#4388). + - **Seamless Switchover**: Effortlessly demote the current primary and + promote a selected replica cluster, typically in a different region, + without needing to rebuild the former primary. This ensures high availability + and resilience in diverse environments (#4411). +- **Managed Services**: Introduce managed services via the `managed.services` + stanza (#4769 and #4952), allowing you to: + - Disable the read-only and read services via configuration. + - Leverage the service template capability to create custom service + resources, including load balancers, to access PostgreSQL outside + Kubernetes (particularly useful for DBaaS purposes). +- **Enhanced API for Synchronous Replication**: Introducing an improved API for + explicit configuration of synchronous replication, supporting both + quorum-based and priority list strategies. This update allows full + customization of the `synchronous_standby_names` option, providing greater + control and flexibility (#5148). +- **WAL Disk Space Exhaustion**: Safely stop the cluster when PostgreSQL runs + out of disk space to store WAL files, making recovery easier by increasing + the size of the related volume (#4404). + +### Enhancements: + +- Add support for delayed replicas by introducing the + `.spec.replica.minApplyDelay` option, leveraging PostgreSQL's + `recovery_min_apply_delay` capability (#5181). +- Introduce `postInitSQLRefs` and `postInitTemplateSQLRefs` to allow users to + define `postInit` and `postInitTemplate` instructions as one or more config + maps or secrets (#5074). +- Add transparent support for PostgreSQL 17's `allow_alter_system` parameter, + enabling or disabling the `ALTER SYSTEM` command through the +`.spec.postgresql.enableAlterSystem` option (#4921). +- Allow overriding the query metric name and the names of the columns using a + `name` key/value pair, which can replace the name automatically inherited + from the parent key (#4779). +- Enhanced control over exported metrics by making them subject to the value + returned by a custom query, which is run within the same transaction and + defined in the `predicate_query` field (#4503). +- Allow additional arguments to be passed to `barman-cloud-wal-archive` and + `barman-cloud-wal-restore` (#5099). +- Introduce the `reconcilePodSpec` annotation on the `Cluster` and `Pooler` + resources to control the restart of pods following a change in the Pod + specification (#5069). +- The readiness probe now fails for streaming replicas that were + never connected to the primary instance, allowing incoherent replicas + to be discovered promptly (#5206). +- Support the new metrics introduced in PgBouncer 1.23 in the `Pooler` metrics + collector (#5044). +- `cnpg` plugin updates: + - Enhance the `install generate` command by adding a `--control-plane` option, + allowing deployment of the operator on control-plane nodes by setting + node affinity and tolerations (#5271). + - Enhance the `destroy` command to delete also any job related to the target + instance (#5298). + - Enhanced the `status` command to display `demotionToken` and + `promotionToken` when available, providing more detailed operational + insights with distributed topologies (#5149). + - Added support for customizing the remote database name in the `publication` + and `subscription` subcommands. This enhancement offers greater flexibility + for synchronizing data from an external cluster with multiple databases (#5113). + +### Security: + +- Add TLS communication between the operator and instance manager (#4442). +- Add optional TLS communication for the instance metrics exporter (#4927). + +### Fixes: + +- Enhance the mechanism for detecting Pods that have been terminated but not + deleted during an eviction process, and extend the cleanup process during + maintenance windows to include unschedulable Pods when the `reusePVC` flag is + set to false (#2056). +- Disable `pg_rewind` execution for newly created replicas that employ + VolumeSnapshot during bootstrapping to avoid introducing a new shutdown + checkpoint entry in the WAL files. This ensures that replicas can reconnect to + the primary without issues, which would otherwise be hindered by the additional + checkpoint entry (#5081). +- Gracefully handle failures during the initialization of a new instance. + Any remaining data from the failed initialization is now either removed or, + if it's a valid PostgreSQL data directory, moved to a backup location to avoid + possible data loss (#5112). +- Enhance the robustness of the immediate backups reconciler by implementing + retry logic upon initial backup failure (#4982). +- Wait for the `postmaster` to shut down before starting it again (#4938). +- Ensure that the `Pooler` service template can override the default service + (#4846). +- Exclude immutable databases from `pg_database` metric monitoring and alerting + processes (#4980). +- Removed unnecessary permissions from the operator service account (#4911). +- Fix cluster role permissions for `ClusterImageCatalogs` (#5034). +- Ensure the operator initiates a rollout of the `Pooler` instance when + the operator image is upgraded (#5006) +- Address race condition causing the readiness probe to incorrectly + show "not ready" after a PostgreSQL restart, even when the + `postmaster` was accessible (#4920). +- Prevent reconciliation of resources that aren't owned by a `Pooler` (#4967). +- Renew the certificates managed by the operator when the DNS Subject + Alternative Names (SANs) are updated (#3269, #3319). +- Set PVC default `AccessModes` in the template only when unspecified (#4845). +- Gracefully handle unsatisfiable backup schedule (#5109). +- Synchronous replication self-healing checks now exclude terminated pods, + focusing only on active and functional pods (#5210). +- The instance manager will now terminate all existing operator-related replication + connections following a role change in a replica cluster (#5209). +- Allow setting `smartShutdownTimeout` to zero, enabling immediate fast + shutdown and bypassing the smart shutdown process when required (#5347). +- `cnpg` plugin: + - Properly handle errors during the `status` command execution. + - Support TLS in the `status` command (#4915). + +### Supported versions + +- Kubernetes 1.31, 1.30, 1.29, and 1.28 +- PostgreSQL 16, 15, 14, 13, and 12 + - PostgreSQL 16.4 is the default image + - PostgreSQL 12 support ends on November 12, 2024 + diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md deleted file mode 100644 index 4e5c2ae8e0..0000000000 --- a/docs/src/release_notes/v1.24.md +++ /dev/null @@ -1,147 +0,0 @@ -# Release notes for CloudNativePG 1.24 - -History of user-visible changes in the 1.24 minor release of CloudNativePG. - -For a complete list of changes, please refer to the -[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24) -on the release branch in GitHub. - -## Version 1.24.0 - -**Release date:** Aug 22, 2024 - -### Important changes: - -- Deprecate the `role` label in the selectors of `Service` and - `PodDisruptionBudget` resources in favor of `cnpg.io/instanceRole` (#4897). -- Fix the default PodAntiAffinity configuration for PostgreSQL Pods, - allowing a PostgreSQL and a Pooler Instance to coexist on the same node when - the anti-affinity configuration is set to `required` (#5156). - -!!! Warning - The PodAntiAffinity change will trigger a rollout of all the instances when - the operator is upgraded, even when online upgrades are enabled. - -### Features: - -- **Distributed PostgreSQL Topologies**: Enhance the replica cluster feature to - create distributed database topologies for PostgreSQL that span multiple - Kubernetes clusters, enabling hybrid and multi-cloud deployments. This feature - supports: - - **Declarative Primary Control**: Easily specify which PostgreSQL cluster - acts as the primary in a distributed setup (#4388). - - **Seamless Switchover**: Effortlessly demote the current primary and - promote a selected replica cluster, typically in a different region, - without needing to rebuild the former primary. This ensures high availability - and resilience in diverse environments (#4411). -- **Managed Services**: Introduce managed services via the `managed.services` - stanza (#4769 and #4952), allowing you to: - - Disable the read-only and read services via configuration. - - Leverage the service template capability to create custom service - resources, including load balancers, to access PostgreSQL outside - Kubernetes (particularly useful for DBaaS purposes). -- **Enhanced API for Synchronous Replication**: Introducing an improved API for - explicit configuration of synchronous replication, supporting both - quorum-based and priority list strategies. This update allows full - customization of the `synchronous_standby_names` option, providing greater - control and flexibility (#5148). -- **WAL Disk Space Exhaustion**: Safely stop the cluster when PostgreSQL runs - out of disk space to store WAL files, making recovery easier by increasing - the size of the related volume (#4404). - -### Enhancements: - -- Add support for delayed replicas by introducing the - `.spec.replica.minApplyDelay` option, leveraging PostgreSQL's - `recovery_min_apply_delay` capability (#5181). -- Introduce `postInitSQLRefs` and `postInitTemplateSQLRefs` to allow users to - define `postInit` and `postInitTemplate` instructions as one or more config - maps or secrets (#5074). -- Add transparent support for PostgreSQL 17's `allow_alter_system` parameter, - enabling or disabling the `ALTER SYSTEM` command through the -`.spec.postgresql.enableAlterSystem` option (#4921). -- Allow overriding the query metric name and the names of the columns using a - `name` key/value pair, which can replace the name automatically inherited - from the parent key (#4779). -- Enhanced control over exported metrics by making them subject to the value - returned by a custom query, which is run within the same transaction and - defined in the `predicate_query` field (#4503). -- Allow additional arguments to be passed to `barman-cloud-wal-archive` and - `barman-cloud-wal-restore` (#5099). -- Introduce the `reconcilePodSpec` annotation on the `Cluster` and `Pooler` - resources to control the restart of pods following a change in the Pod - specification (#5069). -- The readiness probe now fails for streaming replicas that were - never connected to the primary instance, allowing incoherent replicas - to be discovered promptly (#5206). -- Support the new metrics introduced in PgBouncer 1.23 in the `Pooler` metrics - collector (#5044). -- `cnpg` plugin updates: - - Enhance the `install generate` command by adding a `--control-plane` option, - allowing deployment of the operator on control-plane nodes by setting - node affinity and tolerations (#5271). - - Enhance the `destroy` command to delete also any job related to the target - instance (#5298). - - Enhanced the `status` command to display `demotionToken` and - `promotionToken` when available, providing more detailed operational - insights with distributed topologies (#5149). - - Added support for customizing the remote database name in the `publication` - and `subscription` subcommands. This enhancement offers greater flexibility - for synchronizing data from an external cluster with multiple databases (#5113). - -### Security: - -- Add TLS communication between the operator and instance manager (#4442). -- Add optional TLS communication for the instance metrics exporter (#4927). - -### Fixes: - -- Enhance the mechanism for detecting Pods that have been terminated but not - deleted during an eviction process, and extend the cleanup process during - maintenance windows to include unschedulable Pods when the `reusePVC` flag is - set to false (#2056). -- Disable `pg_rewind` execution for newly created replicas that employ - VolumeSnapshot during bootstrapping to avoid introducing a new shutdown - checkpoint entry in the WAL files. This ensures that replicas can reconnect to - the primary without issues, which would otherwise be hindered by the additional - checkpoint entry (#5081). -- Gracefully handle failures during the initialization of a new instance. - Any remaining data from the failed initialization is now either removed or, - if it's a valid PostgreSQL data directory, moved to a backup location to avoid - possible data loss (#5112). -- Enhance the robustness of the immediate backups reconciler by implementing - retry logic upon initial backup failure (#4982). -- Wait for the `postmaster` to shut down before starting it again (#4938). -- Ensure that the `Pooler` service template can override the default service - (#4846). -- Exclude immutable databases from `pg_database` metric monitoring and alerting - processes (#4980). -- Removed unnecessary permissions from the operator service account (#4911). -- Fix cluster role permissions for `ClusterImageCatalogs` (#5034). -- Ensure the operator initiates a rollout of the `Pooler` instance when - the operator image is upgraded (#5006) -- Address race condition causing the readiness probe to incorrectly - show "not ready" after a PostgreSQL restart, even when the - `postmaster` was accessible (#4920). -- Prevent reconciliation of resources that aren't owned by a `Pooler` (#4967). -- Renew the certificates managed by the operator when the DNS Subject - Alternative Names (SANs) are updated (#3269, #3319). -- Set PVC default `AccessModes` in the template only when unspecified (#4845). -- Gracefully handle unsatisfiable backup schedule (#5109). -- Synchronous replication self-healing checks now exclude terminated pods, - focusing only on active and functional pods (#5210). -- The instance manager will now terminate all existing operator-related replication - connections following a role change in a replica cluster (#5209). -- Allow setting `smartShutdownTimeout` to zero, enabling immediate fast - shutdown and bypassing the smart shutdown process when required (#5347). -- `cnpg` plugin: - - Properly handle errors during the `status` command execution. - - Support TLS in the `status` command (#4915). - -### Supported versions - -- Kubernetes 1.31, 1.30, 1.29, and 1.28 -- PostgreSQL 16, 15, 14, 13, and 12 - - PostgreSQL 16.4 is the default image - - PostgreSQL 12 support ends on November 12, 2024 - diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md new file mode 100644 index 0000000000..e52b749c84 --- /dev/null +++ b/docs/src/release_notes/v1.25.md @@ -0,0 +1,349 @@ +# Release notes for CloudNativePG 1.25 + + +History of user-visible changes in the 1.25 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25) +on the release branch in GitHub. + +## Version 1.25.3 + +**Release date:** Jul 25, 2025 + +*In memory of [DJ Walker-Morgan](https://www.linkedin.com/in/codepope/).* + +### Changes + +- Removed `386` and ARM (v5/v6/v7) architectures from the `cnpg` plugin build + matrix, reducing the number of published binaries + ([#7648](https://github.com/cloudnative-pg/cloudnative-pg/pull/7648)). + +### Enhancements + +- Improved validation of `shared_buffers` by correctly considering `HugePages` + settings, ensuring accurate memory configuration checks + ([#7864](https://github.com/cloudnative-pg/cloudnative-pg/pull/7864)). + +- Set `oom_score_adj` for PostgreSQL worker processes to improve prioritization + during out-of-memory situations + ([#7891](https://github.com/cloudnative-pg/cloudnative-pg/pull/7891)). + +- Added `fqdn-uri` and `fqdn-jdbc-uri` fields in user secrets to simplify + application connection string management and align with DNS-based connection + best practices ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). + +- Added the `systemID` field and related condition in the `Cluster` status to track + the PostgreSQL system identifier. + ([#7717](https://github.com/cloudnative-pg/cloudnative-pg/pull/7717)). + +### Fixes + +- Added a mutex in the connection pooler to protect concurrent access to the + connections map, improving stability in high-concurrency environments + ([#7804](https://github.com/cloudnative-pg/cloudnative-pg/pull/7804)). + +- Fixed replica cluster instance ordering by correctly detecting the designated + primary, improving replica cluster stability and switchover operations + ([#8108](https://github.com/cloudnative-pg/cloudnative-pg/pull/8108)). + +- Added support for reconciling `VolumeAttributesClass` for PVCs, enhancing + storage compatibility and lifecycle management + ([#7885](https://github.com/cloudnative-pg/cloudnative-pg/pull/7885)). + +- Made the internal webserver routines non-blocking to improve responsiveness + under load ([#8071](https://github.com/cloudnative-pg/cloudnative-pg/pull/8071)). + +- Fixed an issue where the `ensureReplicationClientLeafCertificate` error did + not display the correct `secretName` in the not found message + ([#8086](https://github.com/cloudnative-pg/cloudnative-pg/pull/8086)). + +- Prevented invalid `ALTER SUBSCRIPTION` statements by updating only + PostgreSQL‑supported parameters; unsupported options like `copy_data` are + ignored to avoid reconciliation failures + ([7844](https://github.com/cloudnative-pg/cloudnative-pg/pull/7844)). + +- Fixed an issue where the `bootstrap-controller` in the connection pooler did + not apply `resources` settings correctly + ([#7922](https://github.com/cloudnative-pg/cloudnative-pg/pull/7922)). + +- Ensured online backups fail cleanly if the `targetPod` becomes unhealthy + during backup, preventing partial or misleading backups + ([#7944](https://github.com/cloudnative-pg/cloudnative-pg/pull/7944)). + +- Ensured the Backup resource status is set properly after a failure, improving + observability and scripting reliability + ([#7898](https://github.com/cloudnative-pg/cloudnative-pg/pull/7898)). + +## Version 1.25.2 + +**Release date:** May 23, 2025 + +### Important Changes + +- **CloudNativePG is now officially a CNCF project**: CloudNativePG has been + accepted into the Cloud Native Computing Foundation (CNCF), marking a + significant milestone in its evolution. As part of this transition, the project + is now governed under **CloudNativePG, a Series of LF Projects, LLC**, ensuring + long-term sustainability and community-driven innovation. (#7203) + +### Enhancements + +- Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, + allowing users to specify the domain suffix for fully qualified domain names + (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to + `cluster.local`. (#6989) + +- Implemented the `cnpg.io/validation` annotation, enabling users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this allows unrestricted changes. (#7196) + +- Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + +- CloudNativePG Interface (CNPG-I): + + - A plugin can now trigger instance rollouts by implementing the `EVALUATE` + verb, ensuring that plugin-induced changes are properly reconciled. (#7126) + + - Introduced support for WAL recovery via CNPG-I plugins during snapshot + restore. (#7284) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + +### Fixes + +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). + +- Fixed a regression in WAL restore operations that prevented fallback to the + in-tree `barmanObjectStore` configuration defined in the `externalCluster` + source when a plugin failed to locate a WAL file (#7507). + +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). + +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). + +- Resolved a race condition that caused the operator to perform two switchovers + when updating the PostgreSQL configuration. (#6991) + +- Corrected the `PodMonitor` configuration by adjusting the `matchLabels` scope + for the targeted pooler and cluster pods. Previously, the `matchLabels` were + too broad, inadvertently inheriting labels from the cluster and leading to data + collection from unintended targets. (#7063) + +- Added a webhook warning for clusters with a missing unit (e.g., MB, GB) in + the `shared_buffers` configuration. This will become an error in future + releases. Users should update their configurations to include explicit units + (e.g., `512MB` instead of `512`). (#7160) + +- Treated timeout errors during volume snapshot creation as retryable to + prevent unnecessary backup failures. (#7010) + +- Moved the defaulting logic for `.spec.postgresql.synchronous.dataDurability` + from the CRD to the webhook to avoid UI issues with OLM. (#7600) + +- CloudNativePG Interface (CNPG-I): + + - Implemented automatic reloading of TLS certificates for plugins when they + change. (#7029) + + - Ensured the operator properly closes the plugin connection when + performing a backup using the plugin. (#7095, #7096) + + - Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). + +- `cnpg` plugin: + + - Increased the buffer size in the `logs pretty` command to better handle + larger log output (#7281). + + - Ensured the `plugin-name` parameter is required for plugin-based backups + and disallowed for non-plugin backup methods (#7506). + + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +### Changes + +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + +## Version 1.25.1 + +**Release Date:** February 28, 2025 + +### Enhancements + +- Introduced a startup probe for the operator to enhance reliability and + prevent premature liveness probe failures during initialization. (#7008) +- Added support for using the `-r` service with the Pooler. (#6868) +- Introduced an optional `--ttl` flag for the `pgbench` plugin, enabling + automatic deletion of completed jobs after a user-defined duration. (#6701) +- Marked known error messages from the Azure CSI Driver for volume snapshots as + retryable, improving resilience. (#6906) +- Updated the default PostgreSQL version to 17.4 for new cluster + definitions. (#6960) + +### Security + +- The operator image build process has been enhanced to strengthen + security and transparency. Images are now signed with `cosign`, and + OCI attestations are generated, incorporating the Software Bill of + Materials (SBOM) and provenance data. Additionally, OCI annotations + have been added to improve traceability and ensure the integrity of + the images. + +### Bug Fixes + +- Fixed inconsistent behavior in default probe knob values when `.spec.probes` + is defined, ensuring users can override all settings, including + `failureThreshold`. If unspecified in the startup probe, `failureThreshold` is + now correctly derived from `.spec.startupDelay / periodSeconds` (default: `10`, + now overridable). The same logic applies to liveness probes via + `.spec.livenessProbeTimeout`. (#6656) +- Managed service ports now take precedence over default operator-defined + ports. (#6474) +- Fixed an issue where WAL metrics were unavailable after an instance restart + until a configuration change was applied. (#6816) +- Fixed an issue in monolithic database import where role import was skipped if + no roles were specified. (#6646) +- Added support for new metrics introduced in PgBouncer 1.24. (#6630) +- Resolved an issue where `Database`, `Publication`, and `Subscription` CRDs + became stuck in `cluster resource has been deleted, skipping reconciliation` + after cluster rehydration. This patch forces `status.observedGeneration` to + zero, ensuring proper reconciliation. (#6607) +- Improved handling of replication-sensitive parameter reductions by ensuring + timely reconciliation after primary server restarts. (#6440) +- Introduced a new `isWALArchiver` flag in the CNPG-I plugin configuration, + allowing users to designate a plugin as a WAL archiver. This enables seamless + migration from in-tree Barman Cloud support to the plugin while maintaining WAL + archive consistency. (#6593) +- Ensured `override.conf` is consistently included in `postgresql.conf` during + replica cluster bootstrapping, preventing replication failures due to missing + configuration settings. (#6808) +- Ensured `override.conf` is correctly initialized before invoking `pg_rewind` + to prevent failures during primary role changes. (#6670) +- Enhanced webhook responses to return both warnings and errors when + applicable, improving diagnostic accuracy. (#6579) +- Ensured the operator version is correctly reconciled. (#6496) +- Improved PostgreSQL version detection by using a more precise check of the + data directory. (#6659) +- Volume Snapshot Backups: + - Fixed an issue where unused backup connections were not properly cleaned + up. (#6882) + - Ensured the instance manager closes stale PostgreSQL connections left by + failed volume snapshot backups. (#6879) + - Prevented the operator from starting a new volume snapshot backup while + another is already in progress. (#6890) +- `cnpg` plugin: + - Restored functionality of the `promote` plugin command. (#6476) + - Enhanced `kubectl cnpg report --logs ` to collect logs from all + containers, including sidecars. (#6636) + - Ensured `pgbench` jobs can run when a `Cluster` uses an `ImageCatalog`. + (#6868) + +### Technical Enhancements + +- Added support for Kubernetes `client-gen`, enabling automated generation of + Go clients for all CloudNativePG CRDs. (#6695) + +## Version 1.25.0 + +**Release Date:** December 23, 2024 + +### Features + +- **Declarative Database Management**: Introduce the `Database` Custom Resource + Definition (CRD), enabling users to create and manage PostgreSQL databases + declaratively within a cluster. (#5325) + +- **Logical Replication Management**: Add `Publication` and `Subscription` CRDs + for declarative management of PostgreSQL logical replication. These simplify + replication setup and facilitate online migrations to CloudNativePG. (#5329) + +- **Experimental Support for CNPG-I**: Introducing CNPG-I (CloudNativePG + Interface), a standardized framework designed to extend CloudNativePG + functionality through third-party plugins and foster the growth of the CNPG + ecosystem. + The [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) serves as a live + example, illustrating how plugins can be developed to enhance backup and + recovery workflows. Although CNPG-I support is currently experimental, it + offers a powerful approach to extending CloudNativePG without modifying the + operator’s core code—akin to PostgreSQL extensions. We welcome community + feedback and contributions to shape this exciting new capability. + +### Enhancements + +- Add the `dataDurability` option to the `.spec.postgresql.synchronous` stanza, + allowing users to choose between `required` (default) or `preferred` + durability in synchronous replication. (#5878) +- Enable customization of startup, liveness, and readiness probes through the + `.spec.probes` stanza. (#6266) +- Support additional `pg_dump` and `pg_restore` options to enhance database + import flexibility. (#6214) +- Add support for `maxConcurrentReconciles` in the CloudNativePG controller and + set the default to 10, improving the operator's ability to efficiently manage + larger deployments out of the box. (#5678) +- Add the `cnpg.io/userType` label to secrets generated for predefined users, + specifically `superuser` and `app`. (#4392) +- Improved validation for the `spec.schedule` field in ScheduledBackups, + raising warnings for potential misconfigurations. (#5396) +- `cnpg` plugin: + - Enhance the `backup` command to support plugins. (#6045) + - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) + +### Bug Fixes + +- Ensure the former primary flushes its WAL file queue to the archive before + re-synchronizing as a replica, reducing recovery times and enhancing data + consistency during failovers. (#6141) +- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265) +- Update the operator to set the cluster phase to `Unrecoverable` when + all previously generated `PersistentVolumeClaims` are missing. (#6170) +- Fix the parsing of the `synchronous_standby_names` GUC when + `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Resolved a potential race condition when patching certain conditions + in CRD statuses, improving reliability in concurrent updates. (#6328) +- Correct role changes to apply at the transaction level instead of the + database context. (#6064) +- Remove the `primary_slot_name` definition from the `override.conf` file on + the primary to ensure it is always empty. (#6219) +- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods + to enable seamless access to the `pgbouncer` virtual database using `psql` + from within the container. (#6247) +- Remove unnecessary updates to the Cluster status when verifying changes in + the image catalog. (#6277) +- Prevent panic during recovery from an external server without proper backup + configuration. (#6300) +- Resolved a key collision issue in structured logs, where the name field was + inconsistently used to log two distinct values. (#6324) +- Ensure proper quoting of the inRoles field in SQL statements to prevent + syntax errors in generated SQL during role management. (#6346) +- `cnpg` plugin: + - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) + - Avoid displaying physical backups block when empty with `status` command. (#5998) + +### Supported Versions + +- **Kubernetes**: 1.32, 1.31, 1.30, and 1.29 +- **PostgreSQL**: 17, 16, 15, 14, and 13 + - Default image: PostgreSQL 17.2 + - Officially dropped support for PostgreSQL 12 + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md new file mode 100644 index 0000000000..16067cf9a4 --- /dev/null +++ b/docs/src/release_notes/v1.26.md @@ -0,0 +1,307 @@ +# Release notes for CloudNativePG 1.26 + + +History of user-visible changes in the 1.26 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26) +on the release branch in GitHub. + +## Version 1.26.1 + +**Release date:** Jul 25, 2025 + +*In memory of [DJ Walker-Morgan](https://www.linkedin.com/in/codepope/).* + +### Important Changes + +CloudNativePG is moving toward a plugin-based, backup and recovery agnostic +architecture (initiated in 1.26.0 with Barman Cloud). As part of this +transition, the following fields in the `.status` section of the `Cluster` +resource are now deprecated: + +- `firstRecoverabilityPoint` +- `firstRecoverabilityPointByMethod` +- `lastSuccessfulBackup` +- `lastSuccessfulBackupByMethod` +- `lastFailedBackup` + +Additionally, the following Prometheus metrics are deprecated: + +- `cnpg_collector_first_recoverability_point` +- `cnpg_collector_last_available_backup_timestamp` +- `cnpg_collector_last_failed_backup_timestamp` + +These fields and metrics will no longer update when using plugin-based backups +(e.g., Barman Cloud via CNPG-I). They remain functional for users still using +in-core Barman Cloud and volume snapshot backups. + +> **Note:** We, as maintainers, are sorry for any inconvenience caused by not +> highlighting this change during the 1.26.0 release. As we transition to a +> plugin-based backup and recovery architecture, we encourage you to **test +> your chosen plugin thoroughly in a staging environment before moving to +> production** to ensure your workflows and observability integration continue +> to meet your needs. Thank you for your understanding and for working with us +> as CloudNativePG evolves to provide a more modular and robust experience. + +### Changes + +- Removed `386` and ARM (v5/v6/v7) architectures from the `cnpg` plugin build + matrix, reducing the number of published binaries + ([#7648](https://github.com/cloudnative-pg/cloudnative-pg/pull/7648)). + +### Enhancements + +- Improved validation of `shared_buffers` by correctly considering `HugePages` + settings, ensuring accurate memory configuration checks + ([#7864](https://github.com/cloudnative-pg/cloudnative-pg/pull/7864)). + +- Set `oom_score_adj` for PostgreSQL worker processes to improve prioritization + during out-of-memory situations + ([#7891](https://github.com/cloudnative-pg/cloudnative-pg/pull/7891)). + +- Added `fqdn-uri` and `fqdn-jdbc-uri` fields in user secrets to simplify + application connection string management and align with DNS-based connection + best practices ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). + +- Added the `systemID` field and related condition in the `Cluster` status to track + the PostgreSQL system identifier. + ([#7717](https://github.com/cloudnative-pg/cloudnative-pg/pull/7717)). + +### Fixes + +- Added a mutex in the connection pooler to protect concurrent access to the + connections map, improving stability in high-concurrency environments + ([#7804](https://github.com/cloudnative-pg/cloudnative-pg/pull/7804)). + +- Fixed replica cluster instance ordering by correctly detecting the designated + primary, improving replica cluster stability and switchover operations + ([#8108](https://github.com/cloudnative-pg/cloudnative-pg/pull/8108)). + +- Added support for reconciling `VolumeAttributesClass` for PVCs, enhancing + storage compatibility and lifecycle management + ([#7885](https://github.com/cloudnative-pg/cloudnative-pg/pull/7885)). + +- Made the internal webserver routines non-blocking to improve responsiveness + under load ([#8071](https://github.com/cloudnative-pg/cloudnative-pg/pull/8071)). + +- Fixed an issue where the `ensureReplicationClientLeafCertificate` error did + not display the correct `secretName` in the not found message + ([#8086](https://github.com/cloudnative-pg/cloudnative-pg/pull/8086)). + +- Prevented invalid `ALTER SUBSCRIPTION` statements by updating only + PostgreSQL‑supported parameters; unsupported options like `copy_data` are + ignored to avoid reconciliation failures + ([7844](https://github.com/cloudnative-pg/cloudnative-pg/pull/7844)). + +- Fixed an issue where the `bootstrap-controller` in the connection pooler did + not apply `resources` settings correctly + ([#7922](https://github.com/cloudnative-pg/cloudnative-pg/pull/7922)). + +- Ensured online backups fail cleanly if the `targetPod` becomes unhealthy + during backup, preventing partial or misleading backups + ([#7944](https://github.com/cloudnative-pg/cloudnative-pg/pull/7944)). + +- Ensured the Backup resource status is set properly after a failure, improving + observability and scripting reliability + ([#7898](https://github.com/cloudnative-pg/cloudnative-pg/pull/7898)). + +- Improved liveness probe handling to avoid unnecessary timeouts when they are + not required ([#7902](https://github.com/cloudnative-pg/cloudnative-pg/pull/7902)). + +## Version 1.26.0 + +**Release date:** May 23, 2025 + +### Important Changes + +- **CloudNativePG is now officially a CNCF project**: CloudNativePG has been + accepted into the Cloud Native Computing Foundation (CNCF), marking a + significant milestone in its evolution. As part of this transition, the project + is now governed under **CloudNativePG, a Series of LF Projects, LLC**, ensuring + long-term sustainability and community-driven innovation. (#7203) + +- **Deprecation of Native Barman Cloud Support**: Native support for Barman + Cloud backups and recovery is now deprecated and will be fully removed in + CloudNativePG version 1.28.0. Although still available in the current release, + users are strongly encouraged to begin migrating their existing clusters to the + new [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/) to + ensure a smooth and seamless transition. The plugin should also be used for all + new deployments. This change marks the first step toward making CloudNativePG a + backup-agnostic solution, a goal that will be fully realized when volume + snapshot support is also moved to a plugin-based architecture. (#6876) + +- **End of Support for Barman 3.4 and Earlier**: CloudNativePG no longer + supports Barman versions 3.4 and earlier, including the capability detection + framework. Users running older operand versions (from before April 2023) must + update their operand before upgrading the operator to avoid compatibility + issues. (#7220) + +- **Hibernation Command Changes**: The `hibernate on` and `hibernate off` + commands in the `cnpg` plugin for `kubectl` now serve as shortcuts for + declarative hibernation. The previous imperative approach has been removed in + favor of this method. Additionally, the `hibernate status` command has been + removed, as its functionality is now covered by the standard `status` + command. **Warning:** Do not upgrade to version 1.26 of both the plugin and + the operator unless you are prepared to migrate to the declarative + hibernation method. (#7155) + +### Features + +- **Declarative Offline In-Place Major Upgrades of PostgreSQL**: Introduced + support for offline in-place major upgrades when a new operand container + image with a higher PostgreSQL major version is applied to a cluster. During + the upgrade, all cluster pods are shut down to ensure data consistency. A new + job is created to validate upgrade conditions, run `pg_upgrade`, and set up new + directories for `PGDATA`, WAL files, and tablespaces as needed. Once the + upgrade is complete, replicas are re-created. Failed upgrades can be rolled + back declaratively. (#6664) + +- **Improved Startup and Readiness Probes for Replicas**: Enhanced support for + Kubernetes startup and readiness probes in PostgreSQL instances, providing + greater control over replicas based on the streaming lag. (#6623) + +- **Declarative management of extensions and schemas**: Introduced the + `extensions` and `schemas` stanzas in the Database resource to declaratively + create, modify, and drop PostgreSQL extensions and schemas within a database. (#7062) + +### Enhancements + +- Introduced an opt-in experimental feature to enhance the liveness probe with + network isolation detection for primary instances. This feature can be + activated via the `alpha.cnpg.io/livenessPinger` annotation (#7466). + +- Introduced the `STANDBY_TCP_USER_TIMEOUT` operator configuration setting, + allowing users to specify the `tcp_user_timeout` parameter on all standby + instances managed by the operator. (#7036) + +- Introduced the `DRAIN_TAINTS` operator configuration option, enabling users + to customize which node taints indicate a node is being drained. This + replaces the previous fixed behavior of only recognizing + `node.kubernetes.io/unschedulable` as a drain signal. (#6928) + +- Added a new field in the `status` of the `Cluster` resource to track the + latest known Pod IP (#7546). + +- Added the `pg_extensions` metric, providing information about installed + PostgreSQL extensions and their latest available versions. (#7195) + +- Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, + allowing users to specify the domain suffix for fully qualified domain names + (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to + `cluster.local`. (#6989) + +- Implemented the `cnpg.io/validation` annotation, enabling users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this allows unrestricted changes. (#7196) + +- Added support for patching PostgreSQL instance pods using the + `cnpg.io/podPatch` annotation with a JSON Patch. This may introduce + discrepancies between the operator’s expectations and Kubernetes behavior, so + it should be used with caution. (#6323) + +- Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) + +- Removed the `ENABLE_AZURE_PVC_UPDATES` configuration, as it is no longer + required to resize Azure volumes correctly. The Azure CSI driver includes the + necessary fix as of version [1.11.0](https://github.com/kubernetes-sigs/azuredisk-csi-driver/releases/tag/v1.11.0). (#7297) + +- The `.spec.backup.barmanObjectStore` and `.spec.backup.retentionPolicy` + fields are now deprecated in favor of the external Barman Cloud Plugin, and a + warning is now emitted by the admission webhook when these fields are used in + the `Cluster` specification (#7500). + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + +- CloudNativePG Interface (CNPG-I): + + - A plugin can now trigger instance rollouts by implementing the `EVALUATE` + verb, ensuring that plugin-induced changes are properly reconciled. (#7126) + + - Introduced support for WAL recovery via CNPG-I plugins during snapshot + restore. (#7284) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + +### Fixes + +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). + +- Fixed a regression in WAL restore operations that prevented fallback to the + in-tree `barmanObjectStore` configuration defined in the `externalCluster` + source when a plugin failed to locate a WAL file (#7507). + +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). + +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). + +- Resolved a race condition that caused the operator to perform two switchovers + when updating the PostgreSQL configuration. (#6991) + +- Corrected the `PodMonitor` configuration by adjusting the `matchLabels` scope + for the targeted pooler and cluster pods. Previously, the `matchLabels` were + too broad, inadvertently inheriting labels from the cluster and leading to data + collection from unintended targets. (#7063) + +- Added a webhook warning for clusters with a missing unit (e.g., MB, GB) in + the `shared_buffers` configuration. This will become an error in future + releases. Users should update their configurations to include explicit units + (e.g., `512MB` instead of `512`). (#7160) + +- Treated timeout errors during volume snapshot creation as retryable to + prevent unnecessary backup failures. (#7010) + +- Moved the defaulting logic for `.spec.postgresql.synchronous.dataDurability` + from the CRD to the webhook to avoid UI issues with OLM. (#7600) + +- CloudNativePG Interface (CNPG-I): + + - Implemented automatic reloading of TLS certificates for plugins when they + change. (#7029) + + - Ensured the operator properly closes the plugin connection when + performing a backup using the plugin. (#7095, #7096) + + - Fixed an issue that prevented WALs from being archived on a former + primary node when using a plugin. (#6964) + + - Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). + +- `cnpg` plugin: + + - Increased the buffer size in the `logs pretty` command to better handle + larger log output (#7281). + + - Ensured the `plugin-name` parameter is required for plugin-based backups + and disallowed for non-plugin backup methods (#7506). + + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +### Changes + +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + +### Supported versions + +- Kubernetes 1.33, 1.32, 1.31, and 1.30 +- PostgreSQL 17, 16, 15, 14, and 13 + - PostgreSQL 17.5 is the default image + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/release_notes/v1.27.md b/docs/src/release_notes/v1.27.md new file mode 100644 index 0000000000..e4db13a442 --- /dev/null +++ b/docs/src/release_notes/v1.27.md @@ -0,0 +1,105 @@ +# Release notes for CloudNativePG 1.27 + +History of user-visible changes in the 1.27 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.27) +on the release branch in GitHub. + + + +## Version 1.27.0 + +**Release date:** Aug 12, 2025 + +### Important changes: + +- The default behavior of the [liveness probe](../instance_manager.md#liveness-probe) has been updated. + An [isolated primary is now forcibly shut down](../instance_manager.md#primary-isolation) + within the configured `livenessProbeTimeout` (default: 30 seconds). + +### Features: + +- **Dynamic loading of PostgreSQL extensions**: Introduced the + `.spec.postgresql.extensions` stanza for mounting PostgreSQL extensions, + packaged as OCI-compliant container images, as read-only and immutable volumes + inside instance pods. This allows dynamic extension management without + rebuilding base images. + ([#7991](https://github.com/cloudnative-pg/cloudnative-pg/pull/7991)). + +- **Logical decoding slot synchronization in HA clusters**: Added the + `synchronizeLogicalDecoding` field under + `spec.replicationSlots.highAvailability` to enable automatic synchronization of + logical decoding slots across high-availability clusters, ensuring logical + replication subscribers continue seamlessly after a publisher failover + ([#7931](https://github.com/cloudnative-pg/cloudnative-pg/pull/7931)). + +- **Primary Isolation Check**: Promoted to stable the liveness pinger + experimental feature introduced in 1.26, adding the + `.spec.probes.liveness.isolationCheck` section to enable primary isolation + checks in the liveness probe by default. This improves the detection and + handling of primary connectivity issues in Kubernetes environments + ([#7845](https://github.com/cloudnative-pg/cloudnative-pg/pull/7845)). + +### Enhancements: + +- Introduced an opt-in experimental feature that enables quorum-based failover + to improve safety and data durability during failover events. This feature, + also called failover quorum, can be activated via the + `alpha.cnpg.io/failoverQuorum` annotation. + ([#7572](https://github.com/cloudnative-pg/cloudnative-pg/pull/7572)). + +- Added support for user maps for predefined users such as `streaming_replica`, + allowing the use of self-managed client certificates with different Common + Names in environments with strict policies or shared CAs, while still enabling + replicas to join clusters using the `streaming_replica` role + ([#7725](https://github.com/cloudnative-pg/cloudnative-pg/pull/7725)). + +- Added a new `PhaseFailurePlugin` phase in the `Cluster` status to improve + observability of plugin-related failures + ([#7988](https://github.com/cloudnative-pg/cloudnative-pg/pull/7988)). + +- Made the `Backup.spec` field immutable after creation, ensuring consistency + and predictability in backup operations + ([#7904](https://github.com/cloudnative-pg/cloudnative-pg/pull/7904)). + +- Added `fqdn-uri` and `fqdn-jdbc-uri` fields in the user secret to simplify + the retrieval of fully qualified domain name-based connection strings + ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). + +- CNPG-I: + + - Added `Postgres` interface support to the CNPG-I operator, continuing the + transition toward a plugin-based architecture + ([#7179](https://github.com/cloudnative-pg/cloudnative-pg/pull/7179)). + + - Added `metrics` capabilities to the CNPG-I instance webserver, enabling + metrics exposure directly from the instance for better observability + ([#8033](https://github.com/cloudnative-pg/cloudnative-pg/pull/8033)). + +### Fixes + +- Unblocked rollouts when migrating to the `barman-cloud` plugin using the + `switchover` strategy. Former primary Pods now restart correctly after WAL + archiving fails due to missing plugin support. + ([#8236](https://github.com/cloudnative-pg/cloudnative-pg/pull/8236)) + +### Supported versions + +- Kubernetes 1.33, 1.32, and 1.31 +- PostgreSQL 17, 16, 15, 14, and 13 + - PostgreSQL 17.5 is the default image + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/replica_cluster.md b/docs/src/replica_cluster.md index 630fe60bd0..13548b4530 100644 --- a/docs/src/replica_cluster.md +++ b/docs/src/replica_cluster.md @@ -1,4 +1,5 @@ # Replica clusters + A replica cluster is a CloudNativePG `Cluster` resource designed to replicate data from another PostgreSQL instance, ideally also managed by @@ -88,8 +89,8 @@ recovery. There are three main options: seamless data transfer. 2. **WAL Archive**: Use the WAL (Write-Ahead Logging) archive stored in an object store. WAL files are regularly transferred from the source cluster to - the object store, from where the `barman-cloud-wal-restore` utility retrieves - them for the replica cluster. + the object store, from where a CNPG-I plugin like [Barman Cloud](https://cloudnative-pg.io/plugin-barman-cloud/) + retrieves them for the replica cluster via the `restore_command`. 3. **Hybrid Approach**: Combine both streaming replication and WAL archive methods. PostgreSQL can manage and switch between these two approaches as needed to ensure data consistency and availability. @@ -98,11 +99,14 @@ recovery. There are three main options: When configuring the external cluster, you have the following options: -- **`barmanObjectStore` section**: - - Enables use of the WAL archive, with CloudNativePG automatically setting - the `restore_command` in the designated primary instance. - - Allows bootstrapping the replica cluster from an object store using the - `recovery` section if volume snapshots are not feasible. +- **`plugin` section**: + - Enables bootstrapping the replica cluster using a [CNPG-I](https://github.com/cloudnative-pg/cnpg-i) + plugin that support the + [`restore_job`](https://github.com/cloudnative-pg/cnpg-i/blob/main/docs/protocol.md#restore_job-proto) + and the [`wal`](https://github.com/cloudnative-pg/cnpg-i/blob/main/docs/protocol.md#wal-proto) protocols. + - CloudNativePG supports the [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/docs/usage/#restoring-a-cluster) + to allow bootstrapping the replica cluster from an object store. + - **`connectionParameters` section**: - Enables bootstrapping the replica cluster via streaming replication using the `pg_basebackup` section. @@ -110,6 +114,13 @@ When configuring the external cluster, you have the following options: designated primary instance, initiating a WAL receiver process to connect to the source cluster and receive data. +You still have access to the **`barmanObjectStore` section**, although deprecated: + +- Enables use of the WAL archive, with CloudNativePG automatically setting + the `restore_command` in the designated primary instance. +- Allows bootstrapping the replica cluster from an object store using the + `recovery` section if volume snapshots are not feasible. + ### Backup and Symmetric Architectures The replica cluster can perform backups to a reserved object store from the @@ -162,9 +173,6 @@ continuous recovery are thoroughly explained below. ## Distributed Topology -!!! Important - The Distributed Topology strategy was introduced in CloudNativePG 1.24. - ### Planning for a Distributed PostgreSQL Database As Dwight Eisenhower famously said, "Planning is everything", and this holds @@ -188,24 +196,33 @@ local object store. This object store is also accessible by the PostgreSQL `Cluster` named `cluster-eu-central`, installed in the Central European Kubernetes cluster. Initially, `cluster-eu-central` functions as a replica cluster. Following a symmetric approach, it also has a local object store for -continuous backup, which needs to be read by `cluster-eu-south`. The recovery -in this setup relies solely on WAL shipping, with no streaming connection -between the two clusters. +continuous backup, which needs to be read by `cluster-eu-south`. + +In this example, recovery is performed solely through WAL shipping, without any +streaming replication between the two clusters. However, you can configure the +setup to use streaming replication alone or adopt a hybrid approach—streaming +replication with WAL shipping as a fallback—as described in the +[“Configuring replication”](replica_cluster.md#defining-an-external-cluster) +section. Here’s how you would configure the `externalClusters` section for both -`Cluster` resources: +`Cluster` resources, relying on Barman Cloud Plugin for the object store: ```yaml # Distributed topology configuration externalClusters: - name: cluster-eu-south - barmanObjectStore: - destinationPath: s3://cluster-eu-south/ - # Additional configuration + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-eu-south + serverName: cluster-eu-south - name: cluster-eu-central - barmanObjectStore: - destinationPath: s3://cluster-eu-central/ - # Additional configuration + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-eu-central + serverName: cluster-eu-central ``` The `.spec.replica` stanza for the `cluster-eu-south` PostgreSQL primary @@ -497,10 +514,12 @@ a backup of the source cluster has been created already. ```yaml externalClusters: - name: - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - s3Credentials: + # Example with Barman Cloud Plugin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: + serverName: … connectionParameters: host: -rw.default.svc diff --git a/docs/src/replication.md b/docs/src/replication.md index a4f1cd93c5..c4eb01e48a 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -1,46 +1,48 @@ # Replication + Physical replication is one of the strengths of PostgreSQL and one of the -reasons why some of the largest organizations in the world have chosen -it for the management of their data in business continuity contexts. -Primarily used to achieve high availability, physical replication also allows -scale-out of read-only workloads and offloading of some work from the primary. +reasons why some of the largest organizations in the world have chosen it for +the management of their data in business continuity contexts. Primarily used to +achieve high availability, physical replication also allows scale-out of +read-only workloads and offloading of some work from the primary. !!! Important This section is about replication within the same `Cluster` resource managed in the same Kubernetes cluster. For information about how to replicate with another Postgres `Cluster` resource, even across different - Kubernetes clusters, please refer to the ["Replica clusters"](replica_cluster.md) - section. + Kubernetes clusters, please refer to the + ["Replica clusters"](replica_cluster.md) section. ## Application-level replication -Having contributed throughout the years to the replication feature in PostgreSQL, -we have decided to build high availability in CloudNativePG on top of -the native physical replication technology, and integrate it -directly in the Kubernetes API. +Having contributed throughout the years to the replication feature in +PostgreSQL, we have decided to build high availability in CloudNativePG on top +of the native physical replication technology, and integrate it directly in the +Kubernetes API. -In Kubernetes terms, this is referred to as **application-level replication**, in -contrast with *storage-level replication*. +In Kubernetes terms, this is referred to as **application-level replication**, +in contrast with *storage-level replication*. ## A very mature technology PostgreSQL has a very robust and mature native framework for replicating data -from the primary instance to one or more replicas, built around the -concept of transactional changes continuously stored in the WAL (Write Ahead Log). +from the primary instance to one or more replicas, built around the concept of +transactional changes continuously stored in the WAL (Write Ahead Log). Started as the evolution of crash recovery and point in time recovery technologies, physical replication was first introduced in PostgreSQL 8.2 -(2006) through WAL shipping from the primary to a warm standby in -continuous recovery. +(2006) through WAL shipping from the primary to a warm standby in continuous +recovery. PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through *hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the -transaction level, supporting RPO=0 clusters. Cascading replication was added -in PostgreSQL 9.2 (2012). The foundations for logical replication were -established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native -support for the publisher/subscriber pattern to replicate data from an origin -to a destination. The table below summarizes these milestones. +transaction level, supporting [RPO](before_you_start.md#rpo)=0 clusters. Cascading +replication was added in PostgreSQL 9.2 (2012). The foundations for +[logical replication](logical_replication.md) were established in PostgreSQL +9.4 (2014), and version 10 (2017) introduced native support for the +publisher/subscriber pattern to replicate data from an origin to a destination. The +table below summarizes these milestones. | Version | Year | Feature | |:-------:|:----:|-----------------------------------------------------------------------| @@ -56,9 +58,9 @@ versions. ## Streaming replication support -At the moment, CloudNativePG natively and transparently manages -physical streaming replicas within a cluster in a declarative way, based on -the number of provided `instances` in the `spec`: +At the moment, CloudNativePG natively and transparently manages physical +streaming replicas within a cluster in a declarative way, based on the number of +provided `instances` in the `spec`: ``` replicas = instances - 1 (where instances > 0) @@ -69,18 +71,18 @@ called `streaming_replica` as follows: ```sql CREATE USER streaming_replica WITH REPLICATION; - -- NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOBYPASSRLS +-- NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOBYPASSRLS ``` Out of the box, the operator automatically sets up streaming replication within the cluster over an encrypted channel and enforces TLS client certificate -authentication for the `streaming_replica` user - as highlighted by the following -excerpt taken from `pg_hba.conf`: +authentication for the `streaming_replica` user - as highlighted by the +following excerpt taken from `pg_hba.conf`: ``` # Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica ``` !!! Seealso "Certificates" @@ -101,9 +103,9 @@ the primary's storage, even after a failover or switchover. ### Continuous backup integration In case continuous backup is configured in the cluster, CloudNativePG -transparently configures replicas to take advantage of `restore_command` when -in continuous recovery. As a result, PostgreSQL can use the WAL archive -as a fallback option whenever pulling WALs via streaming replication fails. +transparently configures replicas to take advantage of `restore_command` when in +continuous recovery. As a result, PostgreSQL can use the WAL archive as a +fallback option whenever pulling WALs via streaming replication fails. ## Synchronous Replication @@ -111,16 +113,24 @@ CloudNativePG supports both [quorum-based and priority-based synchronous replication for PostgreSQL](https://www.postgresql.org/docs/current/warm-standby.html#SYNCHRONOUS-REPLICATION). !!! Warning - Please be aware that synchronous replication will halt your write - operations if the required number of standby nodes to replicate WAL data for - transaction commits is unavailable. In such cases, write operations for your - applications will hang. This behavior differs from the previous implementation - in CloudNativePG but aligns with the expectations of a PostgreSQL DBA for this - capability. - -While direct configuration of the `synchronous_standby_names` option is -prohibited, CloudNativePG allows you to customize its content and extend -synchronous replication beyond the `Cluster` resource through the + By default, synchronous replication pauses write operations if the required + number of standby nodes for WAL replication during transaction commits is + unavailable. This behavior prioritizes data durability and aligns with + PostgreSQL DBA best practices. However, if self-healing is a higher priority + than strict data durability in your setup, this setting can be adjusted. For + details on managing this behavior, refer to the [Data Durability and Synchronous Replication](#data-durability-and-synchronous-replication) + section. + +!!! Important + The [*failover quorum* feature](failover.md#failover-quorum-quorum-based-failover) (experimental) + can be used alongside synchronous replication to improve data durability + and safety during failover events. + +Direct configuration of the `synchronous_standby_names` option is not +permitted. However, CloudNativePG automatically populates this option with the +names of local pods, while also allowing customization to extend synchronous +replication beyond the `Cluster` resource. +This can be achieved through the [`.spec.postgresql.synchronous` stanza](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-SynchronousReplicaConfiguration). Synchronous replication is disabled by default (the `synchronous` stanza is not @@ -132,17 +142,39 @@ defined). When defined, two options are mandatory: ### Quorum-based Synchronous Replication -PostgreSQL's quorum-based synchronous replication makes transaction commits -wait until their WAL records are replicated to at least a certain number of -standbys. To use this method, set `method` to `any`. +In PostgreSQL, quorum-based synchronous replication ensures that transaction +commits wait until their WAL records are replicated to a specified number of +standbys. To enable this, set the `method` to `any`. + +This replication method is the most common setup for a CloudNativePG cluster. + +#### Example + +The example below, based on a typical `cluster-example` configuration with +three instances, sets up quorum-based synchronous replication with at least one +instance: + +```yaml +postgresql: + synchronous: + method: any + number: 1 +``` + +With this configuration, CloudNativePG automatically sets the content of +`synchronous_standby_names` as follows: + +```console +ANY 1 (cluster-example-2, cluster-example-3, cluster-example-1) +``` -#### Migrating from the Deprecated Synchronous Replication Implementation +#### Migrating from Deprecated Synchronous Replication Implementation -This section provides instructions on migrating your existing quorum-based -synchronous replication, defined using the deprecated form, to the new and more -robust capability in CloudNativePG. +This section outlines how to migrate from the deprecated quorum-based +synchronous replication format to the newer, more robust implementation in +CloudNativePG. -Suppose you have the following manifest: +Given the following manifest: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -151,7 +183,6 @@ metadata: name: angus spec: instances: 3 - minSyncReplicas: 1 maxSyncReplicas: 1 @@ -159,7 +190,7 @@ spec: size: 1G ``` -You can convert it to the new quorum-based format as follows: +You can update it to the new format as follows: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -176,14 +207,11 @@ spec: synchronous: method: any number: 1 + dataDurability: required ``` -!!! Important - The primary difference with the new capability is that PostgreSQL will - always prioritize data durability over high availability. Consequently, if no - replica is available, write operations on the primary will be blocked. However, - this behavior is consistent with the expectations of a PostgreSQL DBA for this - capability. +To prioritize self-healing over strict data durability, set `dataDurability` +to `preferred` instead. ### Priority-based Synchronous Replication @@ -222,31 +250,16 @@ the PostgreSQL cluster. You can customize the content of !!! Warning You are responsible for ensuring the correct names in `standbyNamesPre` and - `standbyNamesPost`. CloudNativePG expects that you manage any standby with an - `application_name` listed here, ensuring their high availability. Incorrect - entries can jeopardize your PostgreSQL database uptime. + `standbyNamesPost`. CloudNativePG expects that you manage any standby with + an `application_name` listed here, ensuring their high availability. + Incorrect entries can jeopardize your PostgreSQL database uptime. -### Examples +#### Examples Here are some examples, all based on a `cluster-example` with three instances: If you set: -```yaml -postgresql: - synchronous: - method: any - number: 1 -``` - -The content of `synchronous_standby_names` will be: - -```console -ANY 1 (cluster-example-2, cluster-example-3) -``` - -If you set: - ```yaml postgresql: synchronous: @@ -302,14 +315,158 @@ The `synchronous_standby_names` option will look like: FIRST 2 (angus, cluster-example-2, malcolm) ``` +### Data Durability and Synchronous Replication + +The `dataDurability` option in the `.spec.postgresql.synchronous` stanza +controls the trade-off between data safety and availability for synchronous +replication. It can be set to `required` or `preferred`, with the default being +`required` if not specified. + +!!! Important + `preferred` can only be used when `standbyNamesPre` and `standbyNamesPost` + are unset. + +#### Required Data Durability + +When `dataDurability` is set to `required`, PostgreSQL only considers +transactions committed once WAL (Write-Ahead Log) records have been replicated +to the specified number of synchronous standbys. This setting prioritizes data +safety over availability, meaning write operations will pause if the required +number of synchronous standbys is unavailable. This ensures zero data loss +(RPO=0) but may reduce database availability during network disruptions or +standby failures. + +Synchronous standbys are selected in this priority order: + +1. Healthy instances +2. Unhealthy instances +3. Primary + +The list is then truncated based on `maxStandbyNamesFromCluster` if this value +is set, prioritizing healthy instances and ensuring `synchronous_standby_names` +is populated. + +##### Example + +Consider the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: foo +spec: + instances: 3 + postgresql: + synchronous: + method: any + number: 1 + dataDurability: required +``` + +1. Initial state. The content of `synchronous_standby_names` is: + + ``` + ANY 1 ("foo-2","foo-3","foo-1") + ``` + +2. `foo-2` becomes unavailable. It gets pushed back in priority: + + ``` + ANY 1 ("foo-3","foo-2","foo-1") + ``` + +3. `foo-3` also becomes unavailable. The list contains no healthy standbys: + + ``` + ANY 1 ("foo-2","foo-3","foo-1") + ``` + + At this point no write operations will be allowed until at least one of the + standbys is available again. + +4. When the standbys are available again, `synchronous_standby_names` will + be back to the initial state. + +#### Preferred Data Durability + +When `dataDurability` is set to `preferred`, the required number of synchronous +instances adjusts based on the number of available standbys. PostgreSQL will +attempt to replicate WAL records to the designated number of synchronous +standbys, but write operations will continue even if fewer than the requested +number of standbys are available. + +!!! Important + Make sure you have a clear understanding of what *ready/available* means + for a replica and set your expectations accordingly. By default, a replica is + considered ready when it has successfully connected to the source at least + once. However, CloudNativePG allows you to configure startup and readiness + probes for replicas based on maximum lag. For more details, please refer to + the ["Postgres instance manager" section](instance_manager.md). + +This setting balances data safety with availability, enabling applications to +continue writing during temporary standby unavailability—hence, it’s also known +as *self-healing mode*. + +!!! Warning + This mode may result in data loss if all standbys become unavailable. + +With `preferred` data durability, **only healthy replicas** are included in +`synchronous_standby_names`. + +##### Example + +Consider the following example. For demonstration, we’ll use a cluster named +`bar` with 5 instances and 2 synchronous standbys: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: bar +spec: + instances: 5 + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred +``` + +1. Initial state. The content of `synchronous_standby_names` is: + + ``` + ANY 2 ("bar-2","bar-3", "bar-4", "bar-5") + ``` + +2. `bar-2` and `bar-3` become unavailable. They are removed from the list: + + ``` + ANY 2 ("bar-4", "bar-5") + ``` + +3. `bar-4` also becomes unavailable. It gets removed from the list. Since the + number of available standbys is less than the requested number, the requested + amount gets reduced: + + ``` + ANY 1 ("bar-5") + ``` + +4. `bar-5` also becomes unavailable. `synchronous_standby_names` becomes empty, + disabling synchronous replication completely. Write operations will continue, + but with the risk of potential data loss in case of a primary failure. +5. When the replicas are back, `synchronous_standby_names` will be back to + the initial state. + ## Synchronous Replication (Deprecated) !!! Warning Prior to CloudNativePG 1.24, only the quorum-based synchronous replication - implementation was supported. Although this method is now deprecated, it will - not be removed anytime soon. - The new method prioritizes data durability over self-healing and offers - more robust features, including priority-based synchronous replication and full + implementation was supported. Although this method is now deprecated, it + will not be removed anytime soon. + The new method prioritizes data durability over self-healing and offers more + robust features, including priority-based synchronous replication and full control over the `synchronous_standby_names` option. It is recommended to gradually migrate to the new configuration method for synchronous replication, as explained in the previous paragraph. @@ -342,7 +499,7 @@ ANY q (pod1, pod2, ...) Where: -- `q` is an integer automatically calculated by the operator to be: +- `q` is an integer automatically calculated by the operator to be: `1 <= minSyncReplicas <= q <= maxSyncReplicas <= readyReplicas` - `pod1, pod2, ...` is the list of all PostgreSQL pods in the cluster @@ -380,12 +537,14 @@ Postgres pod are. legacy implementation of synchronous replication (see ["Synchronous Replication (Deprecated)"](replication.md#synchronous-replication-deprecated)). -As an example use-case for this feature: in a cluster with a single sync replica, -we would be able to ensure the sync replica will be in a different availability -zone from the primary instance, usually identified by the `topology.kubernetes.io/zone` +As an example use-case for this feature: in a cluster with a single sync +replica, we would be able to ensure the sync replica will be in a different +availability zone from the primary instance, usually identified by +the `topology.kubernetes.io/zone` [label on a node](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone). -This would increase the robustness of the cluster in case of an outage in a single -availability zone, especially in terms of recovery point objective (RPO). +This would increase the robustness of the cluster in case of an outage in a +single availability zone, especially in terms of recovery point objective +([RPO](before_you_start.md#rpo)). The idea of anti-affinity is to ensure that sync replicas that participate in the quorum are chosen from pods running on nodes that have different values for @@ -400,8 +559,8 @@ the replicas are eligible for synchronous replication. The example below shows how this can be done through the `syncReplicaElectionConstraint` section within `.spec.postgresql`. -`nodeLabelsAntiAffinity` allows you to specify those node labels that need to -be evaluated to make sure that synchronous replication will be dynamically +`nodeLabelsAntiAffinity` allows you to specify those node labels that need to be +evaluated to make sure that synchronous replication will be dynamically configured by the operator between the current primary and the replicas which are located on nodes having a value of the availability zone label different from that of the node where the primary is: @@ -426,22 +585,24 @@ as storage, CPU, or memory. [Replication slots](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-SLOTS) are a native PostgreSQL feature introduced in 9.4 that provides an automated way to ensure that the primary does not remove WAL segments until all the attached -streaming replication clients have received them, and that the primary -does not remove rows which could cause a recovery conflict even when the -standby is (temporarily) disconnected. +streaming replication clients have received them, and that the primary does not +remove rows which could cause a recovery conflict even when the standby is ( +temporarily) disconnected. A replication slot exists solely on the instance that created it, and PostgreSQL -does not replicate it on the standby servers. As a result, after a failover -or a switchover, the new primary does not contain the replication slot from -the old primary. This can create problems for the streaming replication clients -that were connected to the old primary and have lost their slot. +does not replicate it on the standby servers. As a result, after a failover or a +switchover, the new primary does not contain the replication slot from the old +primary. This can create problems for the streaming replication clients that +were connected to the old primary and have lost their slot. CloudNativePG provides a turn-key solution to synchronize the content of physical replication slots from the primary to each standby, addressing two use cases: - the replication slots automatically created for the High Availability of the - Postgres cluster (see ["Replication slots for High Availability" below](#replication-slots-for-high-availability) for details) + Postgres cluster ( + see ["Replication slots for High Availability" below](#replication-slots-for-high-availability) + for details) - [user-defined replication slots](#user-defined-replication-slots) created on the primary @@ -449,22 +610,22 @@ cases: CloudNativePG fills this gap by introducing the concept of cluster-managed replication slots, starting with high availability clusters. This feature -automatically manages physical replication slots for each hot standby replica -in the High Availability cluster, both in the primary and the standby. +automatically manages physical replication slots for each hot standby replica in +the High Availability cluster, both in the primary and the standby. In CloudNativePG, we use the terms: - **Primary HA slot**: a physical replication slot whose lifecycle is entirely - managed by the current primary of the cluster and whose purpose is to map to - a specific standby in streaming replication. Such a slot lives on the primary + managed by the current primary of the cluster and whose purpose is to map to a + specific standby in streaming replication. Such a slot lives on the primary only. -- **Standby HA slot**: a physical replication slot for a standby whose - lifecycle is entirely managed by another standby in the cluster, based on the - content of the `pg_replication_slots` view in the primary, and updated at regular +- **Standby HA slot**: a physical replication slot for a standby whose lifecycle + is entirely managed by another standby in the cluster, based on the content of + the `pg_replication_slots` view in the primary, and updated at regular intervals using `pg_replication_slot_advance()`. -This feature is enabled by default and can be disabled via configuration. -For details, please refer to the +This feature is enabled by default and can be disabled via configuration. For +details, please refer to the ["replicationSlots" section in the API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ReplicationSlotsConfiguration). Here follows a brief description of the main options: @@ -472,24 +633,13 @@ Here follows a brief description of the main options: : if `true`, the feature is enabled (`true` is the default) `.spec.replicationSlots.highAvailability.slotPrefix` -: the prefix that identifies replication slots managed by the operator - for this feature (default: `_cnpg_`) +: the prefix that identifies replication slots managed by the operator for this +feature (default: `_cnpg_`) `.spec.replicationSlots.updateInterval` : how often the standby synchronizes the position of the local copy of the - replication slots with the position on the current primary, expressed in - seconds (default: 30) - -!!! Important - This capability requires PostgreSQL 11 or higher, as it relies on the - [`pg_replication_slot_advance()` administration function](https://www.postgresql.org/docs/current/functions-admin.html) - to directly manipulate the position of a replication slot. - -!!! Warning - In PostgreSQL 11, enabling replication slots if initially disabled, or conversely - disabling them if initially enabled, will require a rolling update of the - cluster (due to the presence of the `recovery.conf` file that is only read - at startup). +replication slots with the position on the current primary, expressed in +seconds (default: 30) Although it is not recommended, if you desire a different behavior, you can customize the above options. @@ -589,20 +739,92 @@ spec: size: 1Gi ``` +### Logical Decoding Slot Synchronization + +CloudNativePG can synchronize logical decoding (replication) slots across all +nodes in a high-availability cluster, ensuring seamless continuation of logical +replication after a failover or switchover. This feature is disabled by +default, and enabling it requires two steps. + +The first step is to enable logical decoding slot synchronization: + +```yaml + # ... + replicationSlots: + highAvailability: + synchronizeLogicalDecoding: true +``` + +The second step involves configuring PostgreSQL parameters: the required +configuration depends on your PostgreSQL version, as explained below. + +When enabled, the operator automatically manages logical decoding slot states +during failover and switchover, preventing slot invalidation and avoiding data +loss for logical replication clients. + +#### Behavior on PostgreSQL 17 and later + +For PostgreSQL 17 and newer, CloudNativePG transparently manages the +[`synchronized_standby_slots` parameter](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-SYNCHRONIZED-STANDBY-SLOTS). + +You must enable both `sync_replication_slots` and `hot_standby_feedback` in +your PostgreSQL configuration: + +```yaml +# ... +postgresql: + parameters: + # ... + hot_standby_feedback: 'on' + sync_replication_slots: 'on' +``` + +Additionally, you must create the logical replication `Subscription` with the +`failover` option enabled, for example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +# ... +spec: +# ... + parameters: + failover: 'true' +# ... +``` + +When configured, logical WAL sender processes send decoded changes to plugins +only after the specified replication slots confirm receiving and flushing the +relevant WAL, ensuring that: + +- logical replication slots do not consume changes until they are safely + received by the replicas of the publisher, and +- logical replication clients can seamlessly reconnect to a promoted standby + without missing data after failover. + +For more details on logical replication slot synchronization, see the +PostgreSQL documentation on [Logical Replication Failover](https://www.postgresql.org/docs/current/logical-replication-failover.html). + +#### Behavior on PostgreSQL 16 and earlier + +For PostgreSQL 16 and older versions, CloudNativePG uses the +[`pg_failover_slots` extension](https://github.com/EnterpriseDB/pg_failover_slots) +to maintain synchronization of logical replication slots across failovers. + ### Capping the WAL size retained for replication slots -When replication slots is enabled, you might end up running out of disk -space due to PostgreSQL trying to retain WAL files requested by a replication -slot. This might happen due to a standby that is (temporarily?) down, or -lagging, or simply an orphan replication slot. +When replication slots is enabled, you might end up running out of disk space +due to PostgreSQL trying to retain WAL files requested by a replication slot. +This might happen due to a standby that is (temporarily?) down, or lagging, or +simply an orphan replication slot. Starting with PostgreSQL 13, you can take advantage of the [`max_slot_wal_keep_size`](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-MAX-SLOT-WAL-KEEP-SIZE) configuration option controlling the maximum size of WAL files that replication -slots are allowed to retain in the `pg_wal` directory at checkpoint time. -By default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that -replication slots may retain an unlimited amount of WAL files. -As a result, our recommendation is to explicitly set `max_slot_wal_keep_size` +slots are allowed to retain in the `pg_wal` directory at checkpoint time. By +default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that +replication slots may retain an unlimited amount of WAL files. As a result, our +recommendation is to explicitly set `max_slot_wal_keep_size` when replication slots support is enabled. For example: ```ini diff --git a/docs/src/resource_management.md b/docs/src/resource_management.md index 483c2b9a3b..7301a1b8ad 100644 --- a/docs/src/resource_management.md +++ b/docs/src/resource_management.md @@ -1,4 +1,5 @@ # Resource management + In a typical Kubernetes cluster, pods run with unlimited resources. By default, they might be allowed to use as much CPU and RAM as needed. @@ -39,13 +40,25 @@ section in the Kubernetes documentation. For a PostgreSQL workload it is recommended to set a "Guaranteed" QoS. +!!! Info + When the quality of service is set to "Guaranteed", CloudNativePG sets the + `PG_OOM_ADJUST_VALUE` for the `postmaster` process to `0`, in line with the + [PostgreSQL documentation](https://www.postgresql.org/docs/current/kernel-resources.html#LINUX-MEMORY-OVERCOMMIT). + This allows the `postmaster` to retain its low Out-Of-Memory (OOM) score of + `-997`, while its child processes run with an OOM score adjustment of `0`. As a + result, if the OOM killer is triggered, it will terminate the child processes + before the `postmaster`. This behavior helps keep the PostgreSQL instance + alive for as long as possible and enables a clean shutdown procedure in the + event of an eviction. + To avoid resources related issues in Kubernetes, we can refer to the best practices for "out of resource" handling while creating a cluster: - Specify your required values for memory and CPU in the resources section of the manifest file. - This way, you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other + This way, you can avoid the `OOM Killed` and `CPU throttle` or any other resource-related issues on running instances. -- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you must set limits and requests +- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you + must set limits and requests for both memory and CPU to the same value. - Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do in a VM or physical machine scenario - see below). diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md index 01907ab416..1d04a1e2ee 100644 --- a/docs/src/rolling_update.md +++ b/docs/src/rolling_update.md @@ -1,4 +1,5 @@ # Rolling Updates + The operator allows changing the PostgreSQL version used in a cluster while applications are running against it. @@ -57,8 +58,9 @@ The `primaryUpdateMethod` option accepts one of the following values: There's no one-size-fits-all configuration for the update method, as that depends on several factors like the actual workload of your database, the -requirements in terms of RPO and RTO, whether your PostgreSQL architecture is -shared or shared nothing, and so on. +requirements in terms of [RPO](before_you_start.md#rpo) and +[RTO](before_you_start.md#rto), whether your PostgreSQL architecture is shared +or shared nothing, and so on. Indeed, being PostgreSQL a primary/standby architecture database management system, the update process inevitably generates a downtime for your diff --git a/docs/src/samples.md b/docs/src/samples.md index 38a93256cc..1539e118f3 100644 --- a/docs/src/samples.md +++ b/docs/src/samples.md @@ -1,4 +1,5 @@ # Examples + The examples show configuration files for setting up your PostgreSQL cluster. @@ -62,12 +63,18 @@ your PostgreSQL cluster. : [`backup-example.yaml`](samples/backup-example.yaml): An example of a backup that runs against the previous sample. -**Simple cluster with backup configured** +**Simple cluster with backup configured for minio** : *Prerequisites*: The configuration assumes minio is running and working. Update `backup.barmanObjectStore` with your minio parameters or your cloud solution. : [`cluster-example-with-backup.yaml`](samples/cluster-example-with-backup.yaml) A basic cluster with backups configured. +**Simple cluster with backup configured for Scaleway Object Storage** +: *Prerequisites*: The configuration assumes a Scaleway Object Storage bucket exists. + Update `backup.barmanObjectStore` with your Scaleway parameters. +: [`cluster-example-with-backup-scaleway.yaml`](samples/cluster-example-with-backup-scaleway.yaml) + A basic cluster with backups configured to work with Scaleway Object Storage.. + ## Replica clusters **Replica cluster by way of backup from an object store** @@ -135,3 +142,70 @@ For a list of available options, see [API reference](cloudnative-pg.v1.md). **Pooler with custom service config** : [`pooler-external.yaml`](samples/pooler-external.yaml) + +## Logical replication via declarative Publication and Subscription objects + +Two test manifests contain everything needed to set up logical replication: + +**Source cluster with a publication** +: [`cluster-example-logical-source.yaml`](samples/cluster-example-logical-source.yaml) + +Sets up a cluster, `cluster-example` with some tables created in the `app` +database, and, importantly, *adds replication to the app user*. +A publication is created for the cluster on the `app` database: note that the +publication will be reconciled only after the cluster's primary is up and +running. + +**Destination cluster with a subscription** +: *Prerequisites*: The source cluster with publication, defined as above. +: [`cluster-example-logical-destination.yaml`](samples/cluster-example-logical-destination.yaml) + +Sets up a cluster `cluster-example-dest` with: + +- the source cluster defined in the `externalClusters` stanza. Note that it uses + the `app` role to connect, which assumes the source cluster grants it + `replication` privilege. +- a bootstrap import of microservice type, with `schemaOnly` enabled + +A subscription is created on the destination cluster: note that the subscription +will be reconciled only after the destination cluster's primary is up and +running. + +After both clusters have been reconciled, together with the publication and +subscription objects, you can verify that that tables in the source cluster, +and the data in them, have been replicated in the destination cluster + +In addition, there are some standalone example manifests: + +**A plain Publication targeting All Tables** +: *Prerequisites*: an existing cluster `cluster-example`. +: [`publication-example.yaml`](samples/publication-example.yaml) + +**A Publication with a constrained publication target** +: *Prerequisites*: an existing cluster `cluster-example`. +: [`publication-example-objects.yaml`](samples/publication-example-objects.yaml) + +**A plain Subscription** +: Prerequisites: an existing cluster `cluster-example` set up as source, with + a publication `pub-all`. A cluster `cluster-example-dest` set up as a + destination cluster, including the `externalClusters` stanza with + connection parameters to the source cluster, including a role with + replication privilege. +: [`subscription-example.yaml`](samples/subscription-example.yaml) + +All the above manifests create publications or subscriptions on the `app` +database. The Database CRD offers a convenient way to create databases +declaratively. With it, logical replication could be set up for arbitrary +databases. +Which brings us to the next section. + +## Declarative management of Postgres databases + +**A plain Database** +: *Prerequisites*: an existing cluster `cluster-example`. +: [`database-example.yaml`](samples/database-example.yaml) + +**A Database with ICU local specifications** +: *Prerequisites*: an existing cluster `cluster-example` running Postgres 16 + or more advanced. +: [`database-example-icu.yaml`](samples/database-example-icu.yaml) diff --git a/docs/src/samples/cluster-example-full.yaml b/docs/src/samples/cluster-example-full.yaml index 335c7ebfb7..f0237d6f79 100644 --- a/docs/src/samples/cluster-example-full.yaml +++ b/docs/src/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/docs/src/samples/cluster-example-initdb-icu.yaml b/docs/src/samples/cluster-example-initdb-icu.yaml new file mode 100644 index 0000000000..3e9747effe --- /dev/null +++ b/docs/src/samples/cluster-example-initdb-icu.yaml @@ -0,0 +1,19 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb-icu +spec: + instances: 3 + + bootstrap: + initdb: + encoding: UTF8 + localeCollate: en_US.UTF8 + localeCType: en_US.UTF8 + localeProvider: icu + icuLocale: en-US + # we want to order g and G after A (and before b) + icuRules: '&A < g <<< G' + + storage: + size: 1Gi diff --git a/docs/src/samples/cluster-example-logical-destination.yaml b/docs/src/samples/cluster-example-logical-destination.yaml index 75cb3f2af2..abaf006ecf 100644 --- a/docs/src/samples/cluster-example-logical-destination.yaml +++ b/docs/src/samples/cluster-example-logical-destination.yaml @@ -22,12 +22,22 @@ spec: - name: cluster-example connectionParameters: host: cluster-example-rw.default.svc - # We're using the superuser to allow the publication to be - # created directly when connected to the target server. - # See cluster-example-logical-source.yaml for more information - # about this. - user: postgres + user: app dbname: app password: - name: cluster-example-superuser + name: cluster-example-app key: password +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: cluster-example-dest-sub +spec: + cluster: + name: cluster-example-dest + name: sub + dbname: app + publicationName: pub + externalClusterName: cluster-example + parameters: + failover: 'true' diff --git a/docs/src/samples/cluster-example-logical-source.yaml b/docs/src/samples/cluster-example-logical-source.yaml index ad9f888353..39eb64bdbc 100644 --- a/docs/src/samples/cluster-example-logical-source.yaml +++ b/docs/src/samples/cluster-example-logical-source.yaml @@ -3,9 +3,9 @@ kind: Cluster metadata: name: cluster-example spec: - instances: 1 + instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:13 + imageName: ghcr.io/cloudnative-pg/postgresql:17 storage: size: 1Gi @@ -15,18 +15,40 @@ spec: postInitApplicationSQL: - CREATE TABLE numbers (i SERIAL PRIMARY KEY, m INTEGER) - INSERT INTO numbers (m) (SELECT generate_series(1,10000)) - - ALTER TABLE numbers OWNER TO app; + - ALTER TABLE numbers OWNER TO app - CREATE TABLE numbers_two (i SERIAL PRIMARY KEY, m INTEGER) - INSERT INTO numbers_two (m) (SELECT generate_series(1,10000)) - - ALTER TABLE numbers_two OWNER TO app; - - CREATE TABLE numbers_three (i SERIAL PRIMARY KEY, m INTEGER) - - INSERT INTO numbers_three (m) (SELECT generate_series(1,10000)) - - ALTER TABLE numbers_three OWNER TO app; + - ALTER TABLE numbers_two OWNER TO app + - CREATE SCHEMA another_schema + - ALTER SCHEMA another_schema OWNER TO app + - CREATE TABLE another_schema.numbers_three (i SERIAL PRIMARY KEY, m INTEGER) + - INSERT INTO another_schema.numbers_three (m) (SELECT generate_series(1,10000)) + - ALTER TABLE another_schema.numbers_three OWNER TO app - enableSuperuserAccess: true + replicationSlots: + highAvailability: + synchronizeLogicalDecoding: true managed: roles: - name: app login: true replication: true + + postgresql: + parameters: + hot_standby_feedback: 'on' + sync_replication_slots: 'on' + +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: cluster-example-pub +spec: + name: pub + dbname: app + cluster: + name: cluster-example + target: + allTables: true diff --git a/docs/src/samples/cluster-example-syncreplicas-explicit.yaml b/docs/src/samples/cluster-example-syncreplicas-explicit.yaml new file mode 100644 index 0000000000..14ec8f2199 --- /dev/null +++ b/docs/src/samples/cluster-example-syncreplicas-explicit.yaml @@ -0,0 +1,14 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-syncreplicas +spec: + instances: 5 + + postgresql: + synchronous: + method: first + number: 2 + + storage: + size: 1G diff --git a/docs/src/samples/cluster-example-syncreplicas.yaml b/docs/src/samples/cluster-example-syncreplicas-legacy.yaml similarity index 100% rename from docs/src/samples/cluster-example-syncreplicas.yaml rename to docs/src/samples/cluster-example-syncreplicas-legacy.yaml diff --git a/docs/src/samples/cluster-example-syncreplicas-quorum.yaml b/docs/src/samples/cluster-example-syncreplicas-quorum.yaml new file mode 100644 index 0000000000..ba3b6e9226 --- /dev/null +++ b/docs/src/samples/cluster-example-syncreplicas-quorum.yaml @@ -0,0 +1,16 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example + annotations: + alpha.cnpg.io/failoverQuorum: "true" +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 1 + + storage: + size: 1G diff --git a/docs/src/samples/cluster-example-with-backup-scaleway.yaml b/docs/src/samples/cluster-example-with-backup-scaleway.yaml new file mode 100644 index 0000000000..b9f7905edb --- /dev/null +++ b/docs/src/samples/cluster-example-with-backup-scaleway.yaml @@ -0,0 +1,23 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-backup-scaleway +spec: + instances: 3 + storage: + storageClass: standard + size: 1Gi + backup: + barmanObjectStore: + destinationPath: "s3:///backups/" # change with your bucket's name. + endpointURL: "https://s3..scw.cloud" # change with your bucket's location/region. + s3Credentials: + accessKeyId: + name: scaleway + key: ACCESS_KEY_ID + secretAccessKey: + name: scaleway + key: ACCESS_SECRET_KEY + region: + name: scaleway + key: ACCESS_REGION diff --git a/docs/src/samples/cluster-example-with-probes.yaml b/docs/src/samples/cluster-example-with-probes.yaml new file mode 100644 index 0000000000..dc2c1bb94c --- /dev/null +++ b/docs/src/samples/cluster-example-with-probes.yaml @@ -0,0 +1,16 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + + probes: + startup: + type: streaming + maximumLag: 16Mi + failureThreshold: 30 + periodSeconds: 10 + + storage: + size: 1Gi diff --git a/docs/src/samples/cluster-example.yaml b/docs/src/samples/cluster-example.yaml index fb331362f5..2ca0d8d71f 100644 --- a/docs/src/samples/cluster-example.yaml +++ b/docs/src/samples/cluster-example.yaml @@ -4,6 +4,7 @@ metadata: name: cluster-example spec: instances: 3 - + storage: size: 1Gi + diff --git a/docs/src/samples/cluster-import-snapshot-basicauth.yaml b/docs/src/samples/cluster-import-snapshot-basicauth.yaml index 967f23adba..5f6cf6e76b 100644 --- a/docs/src/samples/cluster-import-snapshot-basicauth.yaml +++ b/docs/src/samples/cluster-import-snapshot-basicauth.yaml @@ -13,6 +13,10 @@ spec: - app source: externalCluster: cluster-example + pgDumpExtraOptions: + - '--jobs=2' + pgRestoreExtraOptions: + - '--jobs=2' storage: size: 1Gi externalClusters: diff --git a/docs/src/samples/database-example-icu.yaml b/docs/src/samples/database-example-icu.yaml new file mode 100644 index 0000000000..fdfd367921 --- /dev/null +++ b/docs/src/samples/database-example-icu.yaml @@ -0,0 +1,16 @@ +# NOTE: this manifest will only work properly if the Postgres version supports +# ICU locales and rules (version 16 and newer) +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: db-icu +spec: + name: declarative-icu + owner: app + encoding: UTF8 + localeProvider: icu + icuLocale: en + icuRules: fr + template: template0 + cluster: + name: cluster-example diff --git a/docs/src/samples/k9s/plugins.yml b/docs/src/samples/k9s/plugins.yml index ca242960b5..b51ad07f01 100644 --- a/docs/src/samples/k9s/plugins.yml +++ b/docs/src/samples/k9s/plugins.yml @@ -6,6 +6,7 @@ # h View hibernate status # Shift-H Hibernate cluster (this retains the data, but deletes everything else - including the cluster) # l View cluster logs +# Shift-L View cluster logs pretty # p Connect to the cluster via psql # r Reload the cluster # Shift-R Restart the cluster @@ -26,7 +27,7 @@ plugins: background: false args: - -c - - "kubectl cnpg backup $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg backup $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-hibernate-status: shortCut: h description: Hibernate status @@ -36,7 +37,7 @@ plugins: background: false args: - -c - - "kubectl cnpg hibernate status $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg hibernate status $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-hibernate: shortCut: Shift-H description: Hibernate @@ -47,7 +48,7 @@ plugins: background: false args: - -c - - "kubectl cnpg hibernate on $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg hibernate on $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-hibernate-off: shortCut: Shift-H description: Wake up hibernated cluster in this namespace @@ -58,7 +59,7 @@ plugins: background: false args: - -c - - "kubectl cnpg hibernate off $NAME -n $NAME --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg hibernate off $NAME -n $NAME --context \"$CONTEXT\" 2>&1 | less -R" cnpg-logs: shortCut: l description: Logs @@ -68,7 +69,17 @@ plugins: background: false args: - -c - - "kubectl cnpg logs cluster $NAME -f -n $NAMESPACE --context $CONTEXT" + - "kubectl cnpg logs cluster $NAME -f -n $NAMESPACE --context $CONTEXT" + cnpg-logs-pretty: + shortCut: Shift-L + description: Logs pretty + scopes: + - cluster + command: bash + background: false + args: + - -c + - "kubectl cnpg logs cluster $NAME -f -n $NAMESPACE --context $CONTEXT | kubectl cnpg logs pretty" cnpg-psql: shortCut: p description: PSQL shell @@ -89,7 +100,7 @@ plugins: background: false args: - -c - - "kubectl cnpg reload $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg reload $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-restart: shortCut: Shift-R description: Restart @@ -100,7 +111,7 @@ plugins: background: false args: - -c - - "kubectl cnpg restart $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg restart $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-status: shortCut: s description: Status @@ -110,7 +121,7 @@ plugins: background: false args: - -c - - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-status-verbose: shortCut: Shift-S description: Status (verbose) @@ -120,4 +131,4 @@ plugins: background: false args: - -c - - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose |& less -R" + - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose 2>&1 | less -R" \ No newline at end of file diff --git a/docs/src/samples/monitoring/alerts.yaml b/docs/src/samples/monitoring/alerts.yaml index a4b89b96fc..1fe4051708 100644 --- a/docs/src/samples/monitoring/alerts.yaml +++ b/docs/src/samples/monitoring/alerts.yaml @@ -21,10 +21,10 @@ groups: severity: warning - alert: PGDatabase annotations: - description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + description: Over 300,000,000 transactions from frozen xid on pod {{ $labels.pod }} summary: Number of transactions from the frozen XID to the current one expr: |- - cnpg_pg_database_xid_age > 150000000 + cnpg_pg_database_xid_age > 300000000 for: 1m labels: severity: warning diff --git a/docs/src/samples/monitoring/kube-stack-config.yaml b/docs/src/samples/monitoring/kube-stack-config.yaml index 68c0885fbb..af91202668 100644 --- a/docs/src/samples/monitoring/kube-stack-config.yaml +++ b/docs/src/samples/monitoring/kube-stack-config.yaml @@ -1,9 +1,6 @@ -# Default values for cnp-sandbox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. # -# -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# # -- here you can pass the whole values directly to the kube-prometheus-stack chart enabled: true diff --git a/docs/src/samples/monitoring/prometheusrule.yaml b/docs/src/samples/monitoring/prometheusrule.yaml index 3c72759637..eb344769af 100644 --- a/docs/src/samples/monitoring/prometheusrule.yaml +++ b/docs/src/samples/monitoring/prometheusrule.yaml @@ -26,10 +26,10 @@ spec: severity: warning - alert: PGDatabaseXidAge annotations: - description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + description: Over 300,000,000 transactions from frozen xid on pod {{ $labels.pod }} summary: Number of transactions from the frozen XID to the current one expr: |- - cnpg_pg_database_xid_age > 150000000 + cnpg_pg_database_xid_age > 300000000 for: 1m labels: severity: warning @@ -42,7 +42,7 @@ spec: for: 1m labels: severity: warning - - alert: LastFailedArchiveTime + - alert: LastFailedArchiveTime annotations: description: Archiving failed for {{ $labels.pod }} summary: Checks the last time archiving failed. Will be < 0 when it has not failed. @@ -51,7 +51,7 @@ spec: for: 1m labels: severity: warning - - alert: DatabaseDeadlockConflicts + - alert: DatabaseDeadlockConflicts annotations: description: There are over 10 deadlock conflicts in {{ $labels.pod }} summary: Checks the number of database conflicts diff --git a/docs/src/samples/pooler-tls.yaml b/docs/src/samples/pooler-tls.yaml index 9b58b2d364..20bffa1115 100644 --- a/docs/src/samples/pooler-tls.yaml +++ b/docs/src/samples/pooler-tls.yaml @@ -10,3 +10,5 @@ spec: type: rw pgbouncer: poolMode: session + parameters: + server_tls_protocols: tlsv1.3 diff --git a/docs/src/samples/postgis-example.yaml b/docs/src/samples/postgis-example.yaml index 6907c83e9c..34a3ff44b2 100644 --- a/docs/src/samples/postgis-example.yaml +++ b/docs/src/samples/postgis-example.yaml @@ -3,15 +3,25 @@ kind: Cluster metadata: name: postgis-example spec: - instances: 3 - imageName: ghcr.io/cloudnative-pg/postgis:14 - bootstrap: - initdb: - postInitTemplateSQL: - - CREATE EXTENSION postgis; - - CREATE EXTENSION postgis_topology; - - CREATE EXTENSION fuzzystrmatch; - - CREATE EXTENSION postgis_tiger_geocoder; - + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgis:17 storage: size: 1Gi + postgresql: + parameters: + log_statement: ddl +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: postgis-example-app +spec: + name: app + owner: app + cluster: + name: postgis-example + extensions: + - name: postgis + - name: postgis_topology + - name: fuzzystrmatch + - name: postgis_tiger_geocoder diff --git a/docs/src/samples/publication-example-objects.yaml b/docs/src/samples/publication-example-objects.yaml new file mode 100644 index 0000000000..2cc68a5296 --- /dev/null +++ b/docs/src/samples/publication-example-objects.yaml @@ -0,0 +1,16 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-example-objects +spec: + cluster: + name: cluster-example + name: pub-objects + dbname: app + target: + objects: + - tablesInSchema: public + - table: + schema: another_schema + name: numbers_three + only: true diff --git a/docs/src/samples/publication-example.yaml b/docs/src/samples/publication-example.yaml new file mode 100644 index 0000000000..d2df4bc3f2 --- /dev/null +++ b/docs/src/samples/publication-example.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-example +spec: + cluster: + name: cluster-example + name: pub-all + dbname: app + target: + allTables: true diff --git a/docs/src/samples/subscription-example.yaml b/docs/src/samples/subscription-example.yaml new file mode 100644 index 0000000000..6392d71830 --- /dev/null +++ b/docs/src/samples/subscription-example.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: subscription-sample +spec: + name: sub + dbname: app + publicationName: pub-all + cluster: + name: cluster-example-dest + externalClusterName: cluster-example diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index 046db44ec0..6b3f50fb86 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -1,4 +1,5 @@ # Scheduling + Scheduling, in Kubernetes, is the process responsible for placing a new pod on the best node possible, based on several criteria. @@ -40,7 +41,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 affinity: enablePodAntiAffinity: true # Default value diff --git a/docs/src/security.md b/docs/src/security.md index 6eab222826..7b993c94e5 100644 --- a/docs/src/security.md +++ b/docs/src/security.md @@ -1,4 +1,5 @@ # Security + This section contains information about security for CloudNativePG, that are analyzed at 3 different layers: Code, Container and Cluster. @@ -58,20 +59,78 @@ please use this medium to report it. ## Container Every container image in CloudNativePG is automatically built via CI/CD -pipelines following every commit. These images include not only the operator's +pipelines after every commit. These images include not only the operator's image but also the operands' images, specifically for every supported -PostgreSQL version. During the CI/CD process, images undergo scanning with the -following tools: +PostgreSQL version. + +!!! Important + All operand images are automatically and regularly rebuilt by our pipelines + to incorporate the latest security updates at both the base image and package + levels. This ensures that container images distributed to the community receive + **patch-level updates** regularly. + +During the CI/CD process, images are scanned using the following tools: - **[Dockle](https://github.com/goodwithtech/dockle):** Ensures best practices in the container build process. - **[Snyk](https://snyk.io/):** Detects security issues within the container and reports findings via the GitHub interface. -!!! Important - All operand images are automatically rebuilt daily by our pipelines to - incorporate security updates at the base image and package level, providing - **patch-level updates** for the container images distributed to the community. +### Image Signatures + +The operator and [operand +images](https://github.com/cloudnative-pg/postgres-containers) are +cryptographically signed using [cosign](https://github.com/sigstore/cosign), a +signature tool from [sigstore](https://www.sigstore.dev/). +This process is automated via GitHub Actions and leverages +[short-lived tokens issued through OpenID Connect](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/about-security-hardening-with-openid-connect). + +The token issuer is `https://token.actions.githubusercontent.com`, and the +signing identity corresponds to a GitHub workflow executed under the +[cloudnative-pg](https://github.com/cloudnative-pg/cloudnative-pg/) repository. +This workflow uses the [cosign-installer action](https://github.com/marketplace/actions/cosign-installer) +to streamline the signing process. + +To verify the authenticity of an operator image, use the following `cosign` +command with the image digest: + +```shell +cosign verify ghcr.io/cloudnative-pg/cloudnative-pg@sha256: \ + --certificate-identity-regexp="^https://github.com/cloudnative-pg/cloudnative-pg/" \ + --certificate-oidc-issuer="https://token.actions.githubusercontent.com" +``` + +### Attestations + +Container images include the following attestations for transparency and +traceability: + +- **[Software Bill of Materials + (SBOM)](https://docs.docker.com/build/metadata/attestations/sbom/):** A + comprehensive list of software artifacts included in the image or used during + its build process, formatted using the + [in-toto SPDX predicate standard](https://github.com/in-toto/attestation/blob/main/spec/predicates/spdx.md). +- **[Provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/):** + Metadata detailing how the image was built, following the [SLSA Provenance](https://slsa.dev) + framework. + +You can retrieve the SBOM for a specific image and platform using the following +command: + +```shell +docker buildx imagetools inspect \ + --format '{{ json (index .SBOM "").SPDX }}' +``` + +This command outputs the SBOM in JSON format, providing a detailed view of the +software components and build dependencies. + +For the provenance, use: + +```shell +docker buildx imagetools inspect \ + --format '{{ json (index .Provenance "").SLSA }}' +``` ### Guidelines and Frameworks for Container Security @@ -113,8 +172,8 @@ more about these roles, you can use the `kubectl describe clusterrole` or The above permissions are exclusively reserved for the operator's service account to interact with the Kubernetes API server. They are not directly accessible by the users of the operator that interact only with `Cluster`, - `Pooler`, `Backup`, `ScheduledBackup`, `ImageCatalog` and - `ClusterImageCatalog` resources. + `Pooler`, `Backup`, `ScheduledBackup`, `Database`, `Publication`, + `Subscription`, `ImageCatalog` and `ClusterImageCatalog` resources. Below we provide some examples and, most importantly, the reasons why CloudNativePG requires full or partial management of standard Kubernetes @@ -293,28 +352,49 @@ CloudNativePG. : The instance manager requires to `update` and `patch` the status of any `Backup` resource in the namespace -### Pod Security Policies +### Pod and Container Security Contexts -!!! Important - Starting from Kubernetes v1.21, the use of `PodSecurityPolicy` has been - deprecated, and as of Kubernetes v1.25, it has been completely removed. Despite - this deprecation, we acknowledge that the operator is currently undergoing - testing in older and unsupported versions of Kubernetes. Therefore, this - section is retained for those specific scenarios. +A [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +defines privilege and access control settings for a pod or container. + +CloudNativePG does not require *privileged* mode for container execution. +The PostgreSQL containers run as the `postgres` system user. No component +whatsoever requires running as `root`. -A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) -is the Kubernetes way to define security rules and specifications that a pod needs to meet -to run in a cluster. -For InfoSec reasons, every Kubernetes platform should implement them. +Likewise, Volume access does not require *privileged* mode nor `root` +privileges. Proper permissions must be assigned by the Kubernetes platform +and/or administrators. The PostgreSQL containers run with a read-only root +filesystem (i.e. no writable layer). -CloudNativePG does not require *privileged* mode for containers execution. -The PostgreSQL containers run as `postgres` system user. No component whatsoever requires running as `root`. +The operator manages the setting of security contexts for all pods and +containers of a PostgreSQL cluster. The [Seccomp Profile](https://kubernetes.io/docs/tutorials/security/seccomp/) +to be used for the PostgreSQL containers can be configured with the +`spec.seccompProfile` section of the `Cluster` resource. If this section is left +blank, the containers will use a seccompProfile `Type` of `RuntimeDefault`, that +is, the container runtime default. + +The security context of PostgreSQL containers using the default `seccompProfile` +will look like this: +``` +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault +``` -Likewise, Volumes access does not require *privileges* mode or `root` privileges either. -Proper permissions must be properly assigned by the Kubernetes platform and/or administrators. -The PostgreSQL containers run with a read-only root filesystem (i.e. no writable layer). +#### Security Context Constraints -The operator explicitly sets the required security contexts. +When running in an environment that is utilizing +[Security Context Constraints (SCC)](https://docs.openshift.com/container-platform/4.17/authentication/managing-security-context-constraints.html) +the operator does not explicitly set the security context of the PostgreSQL +cluster pods, but rather allows the pods to inherit the restricted Security +Context Constraints that are already defined. ### Restricting Pod access using AppArmor diff --git a/docs/src/service_management.md b/docs/src/service_management.md index e39357bd20..c9d815e9bb 100644 --- a/docs/src/service_management.md +++ b/docs/src/service_management.md @@ -1,4 +1,5 @@ # Service Management + A PostgreSQL cluster should only be accessed via standard Kubernetes network services directly managed by CloudNativePG. For more details, refer to the @@ -82,11 +83,11 @@ field, as it is managed by the operator. The `updateStrategy` field allows you to control how the operator updates a service definition. By default, the operator uses the `patch` strategy, applying changes directly to the service. -Alternatively, the `recreate` strategy deletes the existing service and +Alternatively, the `replace` strategy deletes the existing service and recreates it from the template. !!! Warning - The `recreate` strategy will cause a service disruption with every + The `replace` strategy will cause a service disruption with every change. However, it may be necessary for modifying certain parameters that can only be set during service creation. diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index f26fd19329..34a960a7a2 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -1,4 +1,5 @@ # Client TLS/SSL connections + !!! Seealso "Certificates" See [Certificates](certificates.md) @@ -90,7 +91,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: cert-test volumeMounts: - name: secret-volume-root-ca @@ -173,7 +174,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 16.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 17.5 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` @@ -184,12 +185,7 @@ By default, the operator sets both [`ssl_min_protocol_version`](https://www.post and [`ssl_max_protocol_version`](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-SSL-MAX-PROTOCOL-VERSION) to `TLSv1.3`. -!!! Important - In PostgreSQL 11, these two GUCs don't exist. Hence, in these specific versions - these values aren't set, and the default values are used. - This assumes that the PostgreSQL operand images include an OpenSSL library that supports the `TLSv1.3` version. If not, or if your client applications need a lower version number, you need to manually configure it in the PostgreSQL configuration as any other Postgres GUC. - diff --git a/docs/src/storage.md b/docs/src/storage.md index 5c56e6459c..cacf8644ac 100644 --- a/docs/src/storage.md +++ b/docs/src/storage.md @@ -1,4 +1,5 @@ # Storage + Storage is the most critical component in a database workload. Storage must always be available, scale, perform well, @@ -258,110 +259,6 @@ doesn't support that, you must delete the pod to trigger the resize. The best way to proceed is to delete one pod at a time, starting from replicas and waiting for each pod to be back up. -### Expanding PVC volumes on AKS - -Currently, [Azure can resize the PVC's volume without restarting the pod only on specific regions](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi#resize-a-persistent-volume-without-downtime). -CloudNativePG has overcome this limitation through the -`ENABLE_AZURE_PVC_UPDATES` environment variable in the -[operator configuration](operator_conf.md#available-options). -When set to `true`, CloudNativePG triggers a rolling update of the -Postgres cluster. - -Alternatively, you can use the following workaround to manually resize the -volume in AKS. - -#### Workaround for volume expansion on AKS - -You can manually resize a PVC on AKS. As an example, suppose you have a cluster -with three replicas: - -``` -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -cluster-example-1 1/1 Running 0 2m37s -cluster-example-2 1/1 Running 0 2m22s -cluster-example-3 1/1 Running 0 2m10s -``` - -An Azure disk can be expanded only while in "unattached" state, as described in the -[Kubernetes documentation](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/known-issues/sizegrow.md). -This means that, to resize a disk used by a PostgreSQL cluster, you need to -perform a manual rollout, first cordoning the node that hosts the pod using the -PVC bound to the disk. This prevents the operator from re-creating the pod and -immediately reattaching it to its PVC before the background disk resizing is -complete. - -First, edit the cluster definition, applying the new size. In this example, the -new size is `2Gi`. - -``` -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: cluster-example -spec: - instances: 3 - - storage: - storageClass: default - size: 2Gi -``` - -Assuming the `cluster-example-1` pod is the cluster's primary, you can proceed -with the replicas first. For example, start with cordoning the Kubernetes node -that hosts the `cluster-example-3` pod: - -``` -kubectl cordon -``` - -Then delete the `cluster-example-3` pod: - -``` -$ kubectl delete pod/cluster-example-3 -``` - -Run the following command: - -``` -kubectl get pvc -w -o=jsonpath='{.status.conditions[].message}' cluster-example-3 -``` - -Wait until you see the following output: - -``` -Waiting for user to (re-)start a Pod to finish file system resize of volume on node. -``` - -Then, you can uncordon the node: - -``` -kubectl uncordon -``` - -Wait for the pod to be re-created correctly and get in a "Running and Ready" state: - -``` -kubectl get pods -w cluster-example-3 -cluster-example-3 0/1 Init:0/1 0 12m -cluster-example-3 1/1 Running 0 12m -``` - -Verify the PVC expansion by running the following command, which returns `2Gi` -as configured: - -``` -kubectl get pvc cluster-example-3 -o=jsonpath='{.status.capacity.storage}' -``` - -You can repeat these steps for the remaining pods. - -!!! Important - Leave the resizing of the disk associated with the primary instance as the - last disk, after promoting through a switchover a new resized pod, using - `kubectl cnpg promote`. For example, use `kubectl cnpg promote cluster-example 3` - to promote `cluster-example-3` to primary. - ### Re-creating storage If the storage class doesn't support volume expansion, you can still regenerate diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 5aee05fb84..49c0940c8b 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -1,4 +1,5 @@ # Supported releases + @@ -7,7 +8,7 @@ releases of CloudNativePG*. We are committed to providing support for the latest minor release, with a -dedication to launching a new minor release every two months. Each release +dedication to launching a new minor release every three months. Each release remains fully supported until reaching its designated "End of Life" date, as outlined in the [support status table for CloudNativePG releases](#support-status-of-cloudnativepg-releases). This includes an additional 3-month assistance window to facilitate seamless @@ -79,19 +80,27 @@ Git tags for versions are prefixed with `v`. ## Support status of CloudNativePG releases -| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | -|-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------| -| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 12 - 16 | -| 1.23.x | Yes | April 24, 2024 | ~ November, 2024 | 1.27, 1.28, 1.29 | 1.30, 1.31 | 12 - 16 | -| main | No, development only | | | | | 12 - 16 | + +| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | +|-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| +| 1.27.x | Yes | 12 Aug 2025 | ~ Feb 2026 | 1.31, 1.32, 1.33 | 1.30, 1.29 | 13 - 17 | +| 1.26.x | Yes | May 23, 2025 | 12 Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | +| 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | +| main | No, development only | | | | | 13 - 17 | + +1 _PostgreSQL 13 will be supported until November 13, 2025._ The list of supported Kubernetes versions in the table depends on what the CloudNativePG maintainers think is reasonable to support and to test. -At the moment, the CloudNativePG community doesn't support or test any -additional Kubernetes distribution, like Red Hat OpenShift. This might change -in the future and, in that case, that would be reflected in an official policy -written by the CloudNativePG maintainers. +Currently, the CloudNativePG community does not officially support or test any +Kubernetes distributions beyond the standard/vanilla one - such as Red Hat +OpenShift. This may change in the future, and if it does, the CloudNativePG +maintainers will update the official policy accordingly. + +If you plan to deploy CloudNativePG on Red Hat OpenShift, you can use the +[certified operator provided by EDB](https://catalog.redhat.com/software/container-stacks/detail/653fd4035eece8598f66d97b), +which comes with full support from EDB. ### Supported PostgreSQL versions @@ -112,11 +121,11 @@ version of PostgreSQL, we might not be able to help you. ## Upcoming releases -| Version | Release date | End of life | -|-----------------|-----------------------|---------------------------| -| 1.25.0 | Nov/Dec, 2024 | May/Jun, 2025 | -| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 | -| 1.27.0 | Jun, 2025 | Dec, 2025 | +| Version | Release date | End of life | +|---------|--------------|-------------| +| 1.28.0 | ~ Nov, 2025 | ~ May, 2026 | +| 1.29.0 | ~ Feb, 2026 | ~ Aug, 2026 | +| 1.30.0 | ~ May, 2026 | ~ Nov, 2026 | !!! Note Feature freeze occurs 1-2 weeks before the release, at which point a @@ -133,6 +142,8 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | Compatible Kubernetes versions | |-----------------|-------------------|---------------------|--------------------------------| +| 1.24.x | Aug 22, 2024 | May 23, 2025 | 1.28, 1.29, 1.30, 1.31 | +| 1.23.x | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 | | 1.22.x | December 21, 2023 | July 24, 2024 | 1.26, 1.27, 1.28 | | 1.21.x | October 12, 2023 | Jun 12, 2024 | 1.25, 1.26, 1.27, 1.28 | | 1.20.x | April 27, 2023 | January 21, 2024 | 1.24, 1.25, 1.26, 1.27 | @@ -144,39 +155,43 @@ version of PostgreSQL, we might not be able to help you. ## What we mean by support -Our support window is roughly five months for each release branch (latest +Our support window is roughly five/six months for each release branch (latest minor release, plus 3 additional months), given that we produce a new final -release every two months. +release every two/three months. -In the following diagram, `release-1.23` is an example of a release branch. +In the following diagram, `release-1.27` is an example of a release branch. -For example, if the latest release is `v1.23.0`, you can expect a supplementary -3-month support period for the preceding release, `v1.22.x`. +For example, if the latest release is `v1.27.0`, you can expect a supplementary +3-month support period for the preceding release, `v1.26.x`. -Only the last patch release of each branch is supported. +**Only the last patch release of each branch is supported.** ```diagram ------+---------------------------------------------> main (trunk development) \ \ \ \ - \ \ v1.23.0 - \ \ Apr 24, 2024 ^ - \ \----------+---------------> release-1.23 | + \ \ v1.27.0 + \ \ Aug 12, 2025 ^ + \ \----------+---------------> release-1.27 | \ | SUPPORTED \ | RELEASES - \ v1.22.0 | = last minor - \ Dec 21, 2023 | release + - +-------------------+---------------> release-1.22 | 3 months + \ v1.26.0 | = last minor + \ May 23, 2025 | release + + +-------------------+---------------> release-1.26 | 3 months v ``` We offer two types of support: Technical support -: Technical assistance is offered on a best-effort basis for supported - releases only. You can request support from the community on the - [CloudNativePG Slack](https://cloudnativepg.slack.com/) (in the `#general` channel), - or using [GitHub Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions). +: Technical assistance is offered on a best-effort basis and is limited to + supported releases only. For help, you can reach out to the community via the + [#cloudnativepg-users](https://cloud-native.slack.com/archives/C08MAUJ7NPM) + channel on the CNCF Slack workspace (if you're not yet a member, you can + [join the workspace](https://communityinviter.com/apps/cloud-native/cncf)). + Alternatively, you can post your questions in + the [GitHub Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) + section of the CloudNativePG repository. Security and bug fixes : We backport important bug fixes — including security fixes - to all @@ -184,7 +199,7 @@ Security and bug fixes *"Does this backport improve `CloudNativePG`, bearing in mind that we really value stability for already-released versions?"* -If you're looking for professional support, see the -[Support page in the website](https://cloudnative-pg.io/support/). -The vendors listed there might provide service level agreements that included -extended support timeframes. +If you’re looking for professional support, please refer to the +[Support page on our website](https://cloudnative-pg.io/support/). +The vendors listed there may offer service level agreements (SLA), including +extended support periods and additional services. diff --git a/docs/src/tablespaces.md b/docs/src/tablespaces.md index 0fcf775d4e..48538995f5 100644 --- a/docs/src/tablespaces.md +++ b/docs/src/tablespaces.md @@ -1,4 +1,5 @@ # Tablespaces + A tablespace is a robust and widely embraced feature in database management systems. It offers a powerful means to enhance the vertical @@ -146,11 +147,13 @@ spec: size: 10Gi tablespaces: - name: current - size: 100Gi - storageClass: fastest + storage: + size: 100Gi + storageClass: fastest - name: this_year - size: 500Gi - storageClass: balanced + storage: + size: 500Gi + storageClass: balanced ``` The `yardbirds` cluster example requests 4 persistent volume claims using @@ -251,6 +254,10 @@ tablespace map) both on object stores and volume snapshots. backup. The lag will be resolved in a maximum of 5 minutes, with the next reconciliation. +!!! Warning + When you add or remove a tablespace in an existing cluster, recovery + from WAL will fail until you take a new base backup. + Once a cluster with tablespaces has a base backup, you can restore a new cluster from it. When it comes to the recovery side, it's your responsibility to ensure that the `Cluster` definition of the recovered diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index df9dc94940..5aa521235f 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -1,4 +1,5 @@ # Troubleshooting + In this page, you can find some basic information on how to troubleshoot CloudNativePG in your Kubernetes cluster deployment. @@ -59,17 +60,6 @@ Please refer to the [plugin document](kubectl-plugin.md) for complete instructio After getting the cluster manifest with the plugin, you should verify if backups are set up and working. -In a cluster with backups set up, you will find, in the cluster Status, the fields -`lastSuccessfulBackup` and `firstRecoverabilityPoint`. You should make sure -there is a recent `lastSuccessfulBackup`. - -A cluster lacking the `.spec.backup` stanza won't have backups. -An insistent message will appear in the PostgreSQL logs: - -``` -Backup not configured, skip WAL archiving. -``` - Before proceeding with troubleshooting operations, it may be advisable to perform an emergency backup depending on your findings regarding backups. Refer to the following section for instructions. @@ -132,25 +122,27 @@ The above steps might be integrated into the `cnpg` plugin at some stage in the ## Logs -Every resource created and controlled by CloudNativePG logs to -standard output, as expected by Kubernetes, and directly in [JSON -format](logging.md). As a result, you should rely on the `kubectl logs` -command to retrieve logs from a given resource. - -For more information, type: +All resources created and managed by CloudNativePG log to standard output in +accordance with Kubernetes conventions, using [JSON format](logging.md). -```shell -kubectl logs --help -``` +While logs are typically processed at the infrastructure level and include +those from CloudNativePG, accessing logs directly from the command line +interface is critical during troubleshooting. You have three primary options +for doing so: -!!! Hint - JSON logs are great for machine reading, but hard to read for human beings. - Our recommendation is to use the `jq` command to improve usability. For - example, you can *pipe* the `kubectl logs` command with `| jq -C`. +- Use the `kubectl logs` command to retrieve logs from a specific resource, and + apply `jq` for better readability. +- Use the [`kubectl cnpg logs` command](kubectl-plugin.md#logs) for + CloudNativePG-specific logging. +- Leverage specialized open-source tools like + [stern](https://github.com/stern/stern), which can aggregate logs from + multiple resources (e.g., all pods in a PostgreSQL cluster by selecting the + `cnpg.io/clusterName` label), filter log entries, customize output formats, + and more. !!! Note - In the sections below, we will show some examples on how to retrieve logs - about different resources when it comes to troubleshooting CloudNativePG. + The following sections provide examples of how to retrieve logs for various + resources when troubleshooting CloudNativePG. ## Operator information @@ -218,7 +210,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4-3 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.5-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -277,14 +269,6 @@ kubectl cnpg status -n !!! Tip You can print more information by adding the `--verbose` option. -!!! Note - Besides knowing cluster status, you can also do the following things with the cnpg plugin: - Promote a replica.
- Manage certificates.
- Make a rollout restart cluster to apply configuration changes.
- Make a reconciliation loop to reload and apply configuration changes.
- For more information, please see [`cnpg` plugin](kubectl-plugin.md) documentation. - Get PostgreSQL container image version: ```shell @@ -294,7 +278,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: ghcr.io/cloudnative-pg/postgresql:16.4-3 + Image Name: ghcr.io/cloudnative-pg/postgresql:17.5-3 ``` !!! Note @@ -800,3 +784,27 @@ API. Please check your networking. Another possible cause is when you have sidecar injection configured. Sidecars such as Istio may make the network temporarily unavailable during startup. If you have sidecar injection enabled, retry with injection disabled. + +### Replicas take over two minutes to reconnect after a failover + +When the primary instance fails, the operator promotes the most advanced standby +to the primary role. Other standby instances then attempt to reconnect to the +`-rw` service for replication. However, during this reconnection process, +`kube-proxy` may not have updated its routing information yet. As a result, the +initial `SYN` packet sent by the standby instances might fail to reach its +intended destination. + +If the network is configured to silently drop packets instead of rejecting them, +standby instances will not receive a response and will retry the connection +after an exponential backoff period. On Linux systems, the default value for the +`tcp_syn_retries` kernel parameter is 6, meaning the system will attempt to +establish the connection for approximately 127 seconds before giving up. This +prolonged retry period can significantly delay the reconnection process. +For more details, consult the +[tcp_syn_retries documentation](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt). + +You can work around this issue by setting `STANDBY_TCP_USER_TIMEOUT` in the +[operator configuration](operator_conf.md#available-options). This will cause +the standby instances to close the TCP connection if the initial `SYN` packet +is not acknowledged within the specified timeout, allowing them to retry the +connection more quickly. diff --git a/docs/src/use_cases.md b/docs/src/use_cases.md index 24b92584e7..d0369b88e5 100644 --- a/docs/src/use_cases.md +++ b/docs/src/use_cases.md @@ -1,4 +1,5 @@ # Use cases + CloudNativePG has been designed to work with applications that reside in the same Kubernetes cluster, for a full cloud native diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index bc67b13757..b82bfa2c5d 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -1,76 +1,51 @@ # WAL archiving + -WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive) -in CloudNativePG. +Write-Ahead Log (WAL) archiving in CloudNativePG is the process of continuously +shipping WAL files to a designated object store from the PostgreSQL primary. +These archives are essential for enabling Point-In-Time Recovery (PITR) and are +a foundational component for both object store and volume snapshot-based backup +strategies. -!!! Important - CloudNativePG currently only supports WAL archives on object stores. Such - WAL archives serve for both object store backups and volume snapshot backups. +## Plugin-Based Architecture -The WAL archive is defined in the `.spec.backup.barmanObjectStore` stanza of -a `Cluster` resource. Please proceed with the same instructions you find in -the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up -the WAL archive. +CloudNativePG supports WAL archiving through a **plugin-based mechanism**, +defined via the [`spec.pluginConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ClusterSpec) +section of the `Cluster` resource. -!!! Info - Please refer to [`BarmanObjectStoreConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BarmanObjectStoreConfiguration) - in the API reference for a full list of options. +Only **one plugin at a time** can be responsible for WAL archiving. This is +configured by setting the `isWALArchiver` field to `true` within the plugin +configuration. -If required, you can choose to compress WAL files as soon as they -are uploaded and/or encrypt them: +## Supported Plugins -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - [...] - wal: - compression: gzip - encryption: AES256 -``` +Currently, the **Barman Cloud Plugin** is the only officially supported WAL +archiving plugin maintained by the CloudNativePG Community. +For full documentation, configuration options, and best practices, see the +[Barman Cloud Plugin documentation](https://cloudnative-pg.io/plugin-barman-cloud/docs/intro/). -You can configure the encryption directly in your bucket, and the operator -will use it unless you override it in the cluster configuration. +## Deprecation Notice: Native Barman Cloud -PostgreSQL implements a sequential archiving scheme, where the -`archive_command` will be executed sequentially for every WAL -segment to be archived. +CloudNativePG still supports WAL archiving natively through the +`.spec.backup.barmanObjectStore` field. While still functional, **this +interface is deprecated** and will be removed in a future release. !!! Important - By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring - that WAL files, even in case of low workloads, are closed and archived - at least every 5 minutes, providing a deterministic time-based value for - your Recovery Point Objective (RPO). Even though you change the value - of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), - our experience suggests that the default value set by the operator is - suitable for most use cases. + All new deployments are strongly encouraged to adopt the plugin-based + architecture, which offers a more flexible and maintainable approach. -When the bandwidth between the PostgreSQL instance and the object -store allows archiving more than one WAL file in parallel, you -can use the parallel WAL archiving feature of the instance manager -like in the following example: +If you are currently using the native `.spec.backup.barmanObjectStore` +approach, refer to the official guide for a smooth transition: +[Migrating from Built-in CloudNativePG Backup](https://cloudnative-pg.io/plugin-barman-cloud/docs/migration/). -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - [...] - wal: - compression: gzip - maxParallel: 8 - encryption: AES256 -``` +## About the archive timeout -In the previous example, the instance manager optimizes the WAL -archiving process by archiving in parallel at most eight ready -WALs, including the one requested by PostgreSQL. +By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring +that WAL files, even in case of low workloads, are closed and archived +at least every 5 minutes, providing a deterministic time-based value for +your Recovery Point Objective ([RPO](before_you_start.md#rpo)). -When PostgreSQL will request the archiving of a WAL that has -already been archived by the instance manager as an optimization, -that archival request will be just dismissed with a positive status. +Even though you change the value of the +[`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), +our experience suggests that the default value set by the operator is suitable +for most use cases. diff --git a/go.mod b/go.mod index 1541ca8dc3..9370554cd7 100644 --- a/go.mod +++ b/go.mod @@ -1,63 +1,62 @@ module github.com/cloudnative-pg/cloudnative-pg -go 1.22.0 - -toolchain go1.23.1 +go 1.24.1 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/semver/v3 v3.2.1 - github.com/avast/retry-go/v4 v4.6.0 + github.com/Masterminds/semver/v3 v3.4.0 + github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a - github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb - github.com/cloudnative-pg/machinery v0.0.0-20240919131343-9dd62b9257c7 + github.com/cloudnative-pg/barman-cloud v0.3.3 + github.com/cloudnative-pg/cnpg-i v0.3.0 + github.com/cloudnative-pg/machinery v0.3.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/evanphx/json-patch/v5 v5.9.0 - github.com/go-logr/logr v1.4.2 + github.com/evanphx/json-patch/v5 v5.9.11 + github.com/go-logr/logr v1.4.3 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 - github.com/jackc/pgx/v5 v5.6.0 - github.com/jackc/puddle/v2 v2.2.1 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 + github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.20.2 - github.com/onsi/gomega v1.34.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 - github.com/prometheus/client_golang v1.20.3 + github.com/onsi/ginkgo/v2 v2.25.2 + github.com/onsi/gomega v1.38.2 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0 + github.com/prometheus/client_golang v1.23.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 - github.com/spf13/cobra v1.8.1 - github.com/stern/stern v1.30.0 + github.com/spf13/cobra v1.10.1 + github.com/stern/stern v1.32.0 github.com/thoas/go-funk v0.9.3 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 - golang.org/x/term v0.24.0 - google.golang.org/grpc v1.65.0 + go.uber.org/zap v1.27.0 + golang.org/x/term v0.34.0 + google.golang.org/grpc v1.75.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver v0.30.3 - k8s.io/apimachinery v0.30.3 - k8s.io/cli-runtime v0.30.3 - k8s.io/client-go v0.30.3 - k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 - sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/yaml v1.4.0 + k8s.io/api v0.34.0 + k8s.io/apiextensions-apiserver v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/cli-runtime v0.34.0 + k8s.io/client-go v0.34.0 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d + sigs.k8s.io/controller-runtime v0.22.0 + sigs.k8s.io/yaml v1.6.0 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect - github.com/fatih/color v1.16.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -65,62 +64,59 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.59.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.starlark.net v0.0.0-20240411212711-9b43f0afd521 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.25.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.36.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/protobuf v1.36.7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/go.sum b/go.sum index 7d2722b6f1..5c818d1472 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,13 @@ -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= -github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= +github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= +github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -18,33 +18,37 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME= -github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= -github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb h1:kZQk+KUCTHQMEgcH8j2/ypcG2HY58zKocmVUvX6c1IA= -github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb/go.mod h1:UILpBDaWvXcYC5kY5DMaVEEQY5483CBApMuHIn0GJdg= -github.com/cloudnative-pg/machinery v0.0.0-20240919131343-9dd62b9257c7 h1:glRSFwMeX1tb1wlN6ZxihPH3nMXL9ZlwU1/xvNFB0iE= -github.com/cloudnative-pg/machinery v0.0.0-20240919131343-9dd62b9257c7/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cloudnative-pg/barman-cloud v0.3.3 h1:EEcjeV+IUivDpmyF/H/XGY1pGaKJ5LS5MYeB6wgGcak= +github.com/cloudnative-pg/barman-cloud v0.3.3/go.mod h1:5CM4MncAxAjnqxjDt0I5E/oVd7gsMLL0/o/wQ+vUSgs= +github.com/cloudnative-pg/cnpg-i v0.3.0 h1:5ayNOG5x68lU70IVbHDZQrv5p+bErCJ0mqRmOpW2jjE= +github.com/cloudnative-pg/cnpg-i v0.3.0/go.mod h1:VOIWWXcJ1RyioK+elR2DGOa4cBA6K+6UQgx05aZmH+g= +github.com/cloudnative-pg/machinery v0.3.1 h1:KtPA6EwELTUNisCMLiFYkK83GU9606rkGQhDJGPB8Yw= +github.com/cloudnative-pg/machinery v0.3.1/go.mod h1:jebuqKxZAbrRKDEEpVCIDMKW+FbWtB9Kf/hb2kMUu9o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -57,45 +61,39 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= -github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -105,14 +103,14 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= -github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 h1:Q3jQ1NkFqv5o+F8dMmHd8SfEmlcwNeo1immFApntEwE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= @@ -121,146 +119,164 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= -github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0 h1:oY+F5FZFmCjCyzkHWPjVQpzvnvEB/0FP+iyzDUUlqFc= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0/go.mod h1:VB7wtBmDT6W2RJHzsvPZlBId+EnmeQA0d33fFTXvraM= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stern/stern v1.30.0 h1:4drczNgYqiVZlZ1rTMWgskokq2Owj+Wb1oYOrgM2TQI= -github.com/stern/stern v1.30.0/go.mod h1:l4c94jBK8YEyroFNTCEwFimLK55bdRKPaeyAFWZPvNQ= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stern/stern v1.32.0 h1:xNw0CizB7/4CkWpI46cAo8tArDnS14eYKLaaDevEnrM= +github.com/stern/stern v1.32.0/go.mod h1:Nv6yoHcb2E1HvklagJyd4rjoysJM4WxvcGVQtE651Xw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.starlark.net v0.0.0-20240411212711-9b43f0afd521 h1:1Ufp2S2fPpj0RHIQ4rbzpCdPLCPkzdK7BaVFH3nkYBQ= -go.starlark.net v0.0.0-20240411212711-9b43f0afd521/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -269,37 +285,36 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWM gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= -k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= -k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= -k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= -k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.1 h1:MYJBOP/yQ3/5tp4/sf6HiiMfNNyO97LmtnirH9SLNr4= -sigs.k8s.io/kustomize/api v0.17.1/go.mod h1:ffn5491s2EiNrJSmgqcWGzQUVhc/pB0OKNI0HsT/0tA= -sigs.k8s.io/kustomize/kyaml v0.17.0 h1:G2bWs03V9Ur2PinHLzTUJ8Ded+30SzXZKiO92SRDs3c= -sigs.k8s.io/kustomize/kyaml v0.17.0/go.mod h1:6lxkYF1Cv9Ic8g/N7I86cvxNc5iinUo/P2vKsHNmpyE= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.0 h1:mTOfibb8Hxwpx3xEkR56i7xSjB+nH4hZG37SrlCY5e0= +sigs.k8s.io/controller-runtime v0.22.0/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 7e5acd9fee..5086ae6d74 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,4 +13,6 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ diff --git a/hack/check-pr-in-release-branches.sh b/hack/check-pr-in-release-branches.sh new file mode 100755 index 0000000000..25d9e57add --- /dev/null +++ b/hack/check-pr-in-release-branches.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# +# check-pr-in-release-branches.sh +# +# Example: ./hack/check-pr-in-release-branches.sh #7988 + +set -euo pipefail + +if [ $# -lt 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +search_string="$1" +shift + +branches="main $(git for-each-ref --format '%(refname)' 'refs/heads/release*' | sed -e 's@refs/heads/@@' | sort -rV)" +found=0 +for branch in $branches; do + echo "Checking branch: $branch" + if git log "origin/$branch" --grep="$search_string" -i --oneline | grep -q .; then + echo "✅ Found \"$search_string\" in commits on branch: $branch" + found=1 + else + echo "❌ \"$search_string\" not found in commits on branch: $branch" + fi +done + +if [ $found -eq 0 ]; then + echo "String \"$search_string\" not found in any specified branches." + exit 1 +fi diff --git a/hack/e2e/audit-policy.yaml b/hack/e2e/audit-policy.yaml new file mode 100644 index 0000000000..780380f59c --- /dev/null +++ b/hack/e2e/audit-policy.yaml @@ -0,0 +1,6 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: RequestResponse + resources: + - group: "postgresql.cnpg.io" diff --git a/hack/e2e/eks-cluster.yaml.template b/hack/e2e/eks-cluster.yaml.template index cf9ca18014..6d91f47958 100644 --- a/hack/e2e/eks-cluster.yaml.template +++ b/hack/e2e/eks-cluster.yaml.template @@ -11,7 +11,7 @@ iam: managedNodeGroups: - name: default - instanceType: m5.large + instanceType: c6a.xlarge desiredCapacity: 3 addons: diff --git a/hack/e2e/env_override_customized.yaml.template b/hack/e2e/env_override_customized.yaml.template index 3cdae2a68e..dad8b70c1c 100644 --- a/hack/e2e/env_override_customized.yaml.template +++ b/hack/e2e/env_override_customized.yaml.template @@ -12,6 +12,9 @@ spec: envFrom: - configMapRef: name: controller-manager-env + env: + - name: STANDBY_TCP_USER_TIMEOUT + value: "5000" args: - controller - --leader-elect=${LEADER_ELECTION} diff --git a/hack/e2e/run-e2e-k3d.sh b/hack/e2e/run-e2e-k3d.sh deleted file mode 100755 index f01b50ecf8..0000000000 --- a/hack/e2e/run-e2e-k3d.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash - -## -## Copyright The CloudNativePG Contributors -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## -# shellcheck disable=SC2317 -# standard bash error handling -set -eEuo pipefail - -if [ "${DEBUG-}" = true ]; then - set -x -fi - -ROOT_DIR=$(realpath "$(dirname "$0")/../../") -HACK_DIR="${ROOT_DIR}/hack" -E2E_DIR="${HACK_DIR}/e2e" - -export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} -export BUILD_IMAGE=${BUILD_IMAGE:-false} -K3D_NODE_DEFAULT_VERSION=v1.30.3 -export K8S_VERSION=${K8S_VERSION:-$K3D_NODE_DEFAULT_VERSION} -export CLUSTER_ENGINE=k3d -export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} -export LOG_DIR=${LOG_DIR:-$ROOT_DIR/_logs/} - -export POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} -export E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%.*}} -export E2E_DEFAULT_STORAGE_CLASS=${E2E_DEFAULT_STORAGE_CLASS:-local-path} -export E2E_CSI_STORAGE_CLASS=${E2E_CSI_STORAGE_CLASS:-csi-hostpath-sc} -export E2E_DEFAULT_VOLUMESNAPSHOT_CLASS=${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS:-csi-hostpath-snapclass} - -export DOCKER_REGISTRY_MIRROR=${DOCKER_REGISTRY_MIRROR:-} -export TEST_CLOUD_VENDOR="local" - -cleanup() { - if [ "${PRESERVE_CLUSTER}" = false ]; then - "${HACK_DIR}/setup-cluster.sh" destroy || true - else - set +x - echo "You've chosen to preserve the Kubernetes cluster." - echo "You can delete it manually later running:" - echo "'${HACK_DIR}/setup-cluster.sh' destroy" - fi -} - -main() { - # Call to setup-cluster.sh script - "${HACK_DIR}/setup-cluster.sh" -r create - - trap cleanup EXIT - - # In case image building is forced it will use a default - # controller image name: cloudnative-pg:e2e. - # Otherwise it will download the image from docker - # registry using below credentials. - if [ "${BUILD_IMAGE}" == false ]; then - # Prevent e2e tests to proceed with empty tag which - # will be considered as "latest". - # This will fail in case heuristic IMAGE_TAG will - # be empty, and will continue if CONTROLLER_IMG - # is manually specified during execution, i.e.: - # - # BUILD_IMAGE=false CONTROLLER_IMG=cloudnative-pg:e2e ./hack/e2e/run-e2e-k3d.sh - # - if [ -z "${CONTROLLER_IMG:-}" ]; then - IMAGE_TAG="$( (git symbolic-ref -q --short HEAD || git describe --tags --exact-match) | tr / -)" - export CONTROLLER_IMG="ghcr.io/cloudnative-pg/cloudnative-pg-testing:${IMAGE_TAG}" - fi - else - unset CONTROLLER_IMG - "${HACK_DIR}/setup-cluster.sh" load - fi - - "${HACK_DIR}/setup-cluster.sh" load-helper-images - - RC=0 - - # Run E2E tests - "${E2E_DIR}/run-e2e.sh" || RC=$? - - ## Export logs - "${HACK_DIR}/setup-cluster.sh" export-logs - - exit $RC -} - -main diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 3f77232917..b4601e8391 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,9 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## + # shellcheck disable=SC2317 # standard bash error handling set -eEuo pipefail @@ -29,21 +33,25 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.31.0 +KIND_NODE_DEFAULT_VERSION=v1.34.0 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} export LOG_DIR=${LOG_DIR:-$ROOT_DIR/_logs/} +export ENABLE_APISERVER_AUDIT=${ENABLE_APISERVER_AUDIT:-false} export POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} export E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%.*}} export E2E_DEFAULT_STORAGE_CLASS=${E2E_DEFAULT_STORAGE_CLASS:-standard} export E2E_CSI_STORAGE_CLASS=${E2E_CSI_STORAGE_CLASS:-csi-hostpath-sc} export E2E_DEFAULT_VOLUMESNAPSHOT_CLASS=${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS:-csi-hostpath-snapclass} +export CONTROLLER_IMG_DIGEST=${CONTROLLER_IMG_DIGEST:-""} +export CONTROLLER_IMG_PRIME_DIGEST=${CONTROLLER_IMG_PRIME_DIGEST:-""} export DOCKER_REGISTRY_MIRROR=${DOCKER_REGISTRY_MIRROR:-} export TEST_CLOUD_VENDOR="local" +# shellcheck disable=SC2329 cleanup() { if [ "${PRESERVE_CLUSTER}" = false ]; then "${HACK_DIR}/setup-cluster.sh" destroy || true @@ -57,7 +65,7 @@ cleanup() { main() { # Call to setup-cluster.sh script - "${HACK_DIR}/setup-cluster.sh" -r create + "${HACK_DIR}/setup-cluster.sh" create trap cleanup EXIT @@ -83,7 +91,8 @@ main() { "${HACK_DIR}/setup-cluster.sh" load fi - "${HACK_DIR}/setup-cluster.sh" load-helper-images + # Comment out when the a new release of kindest/node is release newer than v1.32.1 + # "${HACK_DIR}/setup-cluster.sh" load-helper-images RC=0 diff --git a/hack/e2e/run-e2e-local.sh b/hack/e2e/run-e2e-local.sh index 56a3d78c6c..aa404f4f53 100755 --- a/hack/e2e/run-e2e-local.sh +++ b/hack/e2e/run-e2e-local.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## # standard bash error handling set -eEuo pipefail diff --git a/hack/e2e/run-e2e-ocp.sh b/hack/e2e/run-e2e-ocp.sh index d6a8a0d814..ec2f90db71 100755 --- a/hack/e2e/run-e2e-ocp.sh +++ b/hack/e2e/run-e2e-ocp.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## set -eEuo pipefail @@ -36,6 +39,40 @@ function wait_for() { [[ $ITER -lt $5 ]] } +# Retry a command up to a specific numer of times until it exits successfully, +# with exponential back off. +# +# $ retry 5 echo Hello +# Hello +# +# $ retry 5 false +# Retry 1/5 exited 1, retrying in 1 seconds... +# Retry 2/5 exited 1, retrying in 2 seconds... +# Retry 3/5 exited 1, retrying in 4 seconds... +# Retry 4/5 exited 1, retrying in 8 seconds... +# Retry 5/5 exited 1, no more retries left. +# +# Inspired from https://gist.github.com/sj26/88e1c6584397bb7c13bd11108a579746 +function retry { + local retries=$1 + shift + + local count=0 + until "$@"; do + local exit=$? + local wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." >&2 + sleep $wait + else + echo "Retry $count/$retries exited $exit, no more retries left." >&2 + return $exit + fi + done + return 0 +} + ROOT_DIR=$(realpath "$(dirname "$0")/../../") # we need to export ENVs defined in the workflow and used in run-e2e.sh script export POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} @@ -50,6 +87,9 @@ oc apply -f cloudnative-pg-catalog.yaml # create the secret for the index to be pulled in the marketplace oc create secret docker-registry -n openshift-marketplace --docker-server="${REGISTRY}" --docker-username="${REGISTRY_USER}" --docker-password="${REGISTRY_PASSWORD}" cnpg-pull-secret || true +# Create the default configmap to set global keepalives on all the tests +oc create configmap -n openshift-operators --from-literal=STANDBY_TCP_USER_TIMEOUT=5000 cnpg-controller-manager-config + # Install the operator oc apply -f - <>"${config_file}" <<-EOF + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + # enable auditing flags on the API server + extraArgs: + audit-log-path: /var/log/kubernetes/kube-apiserver-audit.log + audit-policy-file: /etc/kubernetes/policies/audit-policy.yaml + # mount new files / directories on the control plane + extraVolumes: + - name: audit-policies + hostPath: /etc/kubernetes/policies + mountPath: /etc/kubernetes/policies + readOnly: true + pathType: "DirectoryOrCreate" + - name: "audit-logs" + hostPath: "/var/log/kubernetes" + mountPath: "/var/log/kubernetes" + readOnly: false + pathType: DirectoryOrCreate + # mount the local file on the control plane + extraMounts: + - hostPath: ${E2E_DIR}/audit-policy.yaml + containerPath: /etc/kubernetes/policies/audit-policy.yaml + readOnly: true + - hostPath: ${LOG_DIR}/apiserver/ + containerPath: /var/log/kubernetes/ +EOF + fi if [ "$NODES" -gt 1 ]; then for ((i = 0; i < NODES; i++)); do @@ -153,35 +174,39 @@ EOF done fi - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ] || [ -n "${ENABLE_REGISTRY:-}" ]; then - # Add containerdConfigPatches section + # Enable ImageVolume support from kindest/node v1.33.1 + if [[ "$(printf '%s\n' "1.33.1" "${k8s_version#v}" | sort -V | head -n1)" == "1.33.1" ]]; then cat >>"${config_file}" <<-EOF +featureGates: + ImageVolume: true +EOF + fi + + # Add containerdConfigPatches section + cat >>"${config_file}" <<-EOF + containerdConfigPatches: EOF - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ]; then - cat >>"${config_file}" <<-EOF + if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ]; then + cat >>"${config_file}" <<-EOF - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint = ["${DOCKER_REGISTRY_MIRROR}"] EOF - fi + fi - if [ -n "${ENABLE_REGISTRY:-}" ]; then - cat >>"${config_file}" <<-EOF + cat >>"${config_file}" <<-EOF - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."${registry_name}:5000"] endpoint = ["http://${registry_name}:5000"] EOF - fi - fi + # Create the cluster kind create cluster --name "${cluster_name}" --image "kindest/node:${k8s_version}" --config "${config_file}" - if [ -n "${ENABLE_REGISTRY:-}" ]; then - docker network connect "kind" "${registry_name}" &>/dev/null || true - fi + docker network connect "kind" "${registry_name}" &>/dev/null || true # Workaround for https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files for node in $(kind get nodes --name "${cluster_name}"); do @@ -201,128 +226,35 @@ destroy_kind() { docker network rm "kind" &>/dev/null || true } -check_registry_kind() { - [ -n "$(check_registry "kind")" ] -} - -## -## K3D SUPPORT -## - -install_k3d() { - local bindir=$1 - - curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | K3D_INSTALL_DIR=$bindir bash -s -- --no-sudo -} - -create_cluster_k3d() { - local k8s_version=$1 - local cluster_name=$2 - - local latest_k3s_tag - latest_k3s_tag=$(k3d version list k3s | grep -- "^${k8s_version//./\\.}"'\+-k3s[0-9]$' | tail -n 1) - - local options=() - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ] || [ -n "${ENABLE_REGISTRY:-}" ]; then - config_file="${TEMP_DIR}/k3d-registries.yaml" - cat >"${config_file}" <<-EOF -mirrors: -EOF - - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ]; then - cat >>"${config_file}" <<-EOF - "docker.io": - endpoint: - - "${DOCKER_REGISTRY_MIRROR}" -EOF - fi - - if [ -n "${ENABLE_REGISTRY:-}" ]; then - cat >>"${config_file}" <<-EOF - "${registry_name}:5000": - endpoint: - - http://${registry_name}:5000 -EOF - fi - - options+=(--registry-config "${config_file}") - fi - - local agents=() - if [ "$NODES" -gt 1 ]; then - agents=(-a "${NODES}") - fi - - K3D_FIX_MOUNTS=1 k3d cluster create "${options[@]}" "${agents[@]}" -i "rancher/k3s:${latest_k3s_tag}" --no-lb "${cluster_name}" \ - --k3s-arg "--disable=traefik@server:0" --k3s-arg "--disable=metrics-server@server:0" \ - --k3s-arg "--node-taint=node-role.kubernetes.io/master:NoSchedule@server:0" #wokeignore:rule=master - - if [ -n "${ENABLE_REGISTRY:-}" ]; then - docker network connect "k3d-${cluster_name}" "${registry_name}" &>/dev/null || true - fi -} - -load_image_k3d() { - local cluster_name=$1 - local image=$2 - k3d image import "${image}" -c "${cluster_name}" -} - -export_logs_k3d() { - local cluster_name=$1 - while IFS= read -r line; do - NODES_LIST+=("$line") - done < <(k3d node list | awk "/${cluster_name}/{print \$1}") - for i in "${NODES_LIST[@]}"; do - mkdir -p "${LOG_DIR}/${i}" - docker cp -L "${i}:/var/log/." "${LOG_DIR}/${i}" - done -} - -destroy_k3d() { - local cluster_name=$1 - docker network disconnect "k3d-${cluster_name}" "${registry_name}" &>/dev/null || true - k3d cluster delete "${cluster_name}" || true - docker network rm "k3d-${cluster_name}" &>/dev/null || true -} - -check_registry_k3d() { - [ -n "$(check_registry "k3d-${CLUSTER_NAME}")" ] -} - ## ## GENERIC ROUTINES ## -install_kubectl() { - local bindir=$1 - - local binary="${bindir}/kubectl" - - curl -sL "https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}" - chmod +x "${binary}" -} - # The following function makes sure we already have a Docker container # with a bound volume to act as local registry. This is really needed # to have an easy way to refresh the operator version that is running # on the temporary cluster. ensure_registry() { - [ -z "${ENABLE_REGISTRY:-}" ] && return - if ! docker volume inspect "${registry_volume}" &>/dev/null; then docker volume create "${registry_volume}" fi + if ! docker network inspect "${registry_net}" &>/dev/null; then + docker network create "${registry_net}" + fi + if ! docker inspect "${registry_name}" &>/dev/null; then - docker container run -d --name "${registry_name}" -v "${registry_volume}:/var/lib/registry" --restart always -p 5000:5000 registry:2 + docker container run -d --name "${registry_name}" --network "${registry_net}" -v "${registry_volume}:/var/lib/registry" --restart always -p 5000:5000 registry:2 fi } -check_registry() { - local network=$1 - docker network inspect "${network}" | \ - jq -r ".[].Containers | .[] | select(.Name==\"${registry_name}\") | .Name" +# An existing builder will not have any knowledge of the local registry or the +# any host outside the builder, but when having the builder inside Kubernetes +# this is fixed since we already solved the issue of the kubernetes cluster reaching +# out the local registry. The following functions will handle that builder +create_builder() { + docker buildx rm "${builder_name}" &>/dev/null || true + docker buildx create --name "${builder_name}" --driver-opt "network=${registry_net}" } deploy_fluentd() { @@ -377,11 +309,18 @@ deploy_csi_host_path() { kubectl apply -f "${CSI_BASE_URL}"/external-resizer/"${EXTERNAL_RESIZER_VERSION}"/deploy/kubernetes/rbac.yaml ## Install driver and plugin - kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.27/hostpath/csi-hostpath-driverinfo.yaml - kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.27/hostpath/csi-hostpath-plugin.yaml + ## Create a temporary file for the modified plugin deployment. This is needed + ## because csi-driver-host-path plugin yaml tends to lag behind a few versions. + plugin_file="${TEMP_DIR}/csi-hostpath-plugin.yaml" + curl -sSL "${CSI_BASE_URL}/csi-driver-host-path/${CSI_DRIVER_HOST_PATH_VERSION}/deploy/kubernetes-1.30/hostpath/csi-hostpath-plugin.yaml" | + sed "s|registry.k8s.io/sig-storage/hostpathplugin:.*|registry.k8s.io/sig-storage/hostpathplugin:${CSI_DRIVER_HOST_PATH_VERSION}|g" > "${plugin_file}" + + kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.30/hostpath/csi-hostpath-driverinfo.yaml + kubectl apply -f "${plugin_file}" + rm "${plugin_file}" ## create volumesnapshotclass - kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.27/hostpath/csi-hostpath-snapshotclass.yaml + kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.30/hostpath/csi-hostpath-snapshotclass.yaml ## Prevent VolumeSnapshot E2e test to fail when taking a ## snapshot of a running PostgreSQL instance @@ -467,17 +406,13 @@ load_image_registry() { local image_local_name=${image/${registry_name}/127.0.0.1} docker tag "${image}" "${image_local_name}" - docker push -q "${image_local_name}" + docker push --platform "${DOCKER_DEFAULT_PLATFORM}" -q "${image_local_name}" } load_image() { local cluster_name=$1 local image=$2 - if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_${ENGINE}" "${cluster_name}" "${image}" - else - load_image_registry "${image}" - fi + load_image_registry "${image}" } deploy_operator() { @@ -488,11 +423,10 @@ deploy_operator() { usage() { cat >&2 <] [-r] +Usage: $0 [-k ] [-r] Commands: - prepare Downloads the prerequisite into - create Create the test cluster + create Create the test cluster and a local registry load Build and load the operator image in the cluster load-helper-images Load the catalog of HELPER_IMGS into the local registry deploy Deploy the operator manifests in the cluster @@ -504,11 +438,6 @@ Commands: pyroscope Deploy Pyroscope inside operator namespace Options: - -e|--engine - Use the provided ENGINE to run the cluster. - Available options are 'kind' and 'k3d'. Default 'kind'. - Env: CLUSTER_ENGINE - -k|--k8s-version Use the specified kubernetes full version number (e.g., v1.27.0). Env: K8S_VERSION @@ -518,8 +447,6 @@ Options: Used only during "create" command. Default: 3 Env: NODES - -r|--registry Enable local registry. Env: ENABLE_REGISTRY - To use long options you need to have GNU enhanced getopt available, otherwise you can only use the short version of the options. EOF @@ -530,18 +457,10 @@ EOF ## COMMANDS ## -prepare() { - local bindir=$1 - echo "${bright}Installing cluster prerequisites in ${bindir}${reset}" - install_kubectl "${bindir}" - "install_${ENGINE}" "${bindir}" - echo "${bright}Done installing cluster prerequisites in ${bindir}${reset}" -} - create() { - echo "${bright}Creating ${ENGINE} cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" + echo "${bright}Creating kind cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" - "create_cluster_${ENGINE}" "${K8S_VERSION}" "${CLUSTER_NAME}" + "create_cluster_kind" "${K8S_VERSION}" "${CLUSTER_NAME}" # Support for docker:dind service if [ "${DOCKER_HOST:-}" == "tcp://docker:2376" ]; then @@ -552,7 +471,7 @@ create() { deploy_csi_host_path deploy_prometheus_crds - echo "${bright}Done creating ${ENGINE} cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" + echo "${bright}Done creating kind cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" } load_helper_images() { @@ -562,7 +481,7 @@ load_helper_images() { # with the goal to speed up the runs. for IMG in "${HELPER_IMGS[@]}"; do docker pull "${IMG}" - "load_image_${ENGINE}" "${CLUSTER_NAME}" "${IMG}" + "load_image_kind" "${CLUSTER_NAME}" "${IMG}" done echo "${bright}Done loading helper images on cluster ${CLUSTER_NAME}${reset}" @@ -576,19 +495,17 @@ load() { # This code will NEVER run in the cloud CI/CD workflows, as there we do # the build and push (into GH test registry) once in `builds`, before # the strategy matrix blows up the number of executables - if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_${ENGINE}"; then - ENABLE_REGISTRY=true - fi + + create_builder echo "${bright}Building operator from current worktree${reset}" - CONTROLLER_IMG="$(ENABLE_REGISTRY="${ENABLE_REGISTRY}" print_image)" - make -C "${ROOT_DIR}" CONTROLLER_IMG="${CONTROLLER_IMG}" ARCH="${ARCH}" docker-build + CONTROLLER_IMG="$(print_image)" + make -C "${ROOT_DIR}" CONTROLLER_IMG="${CONTROLLER_IMG}" insecure="true" \ + ARCH="${ARCH}" BUILDER_NAME=${builder_name} docker-build echo "${bright}Loading new operator image on cluster ${CLUSTER_NAME}${reset}" - load_image "${CLUSTER_NAME}" "${CONTROLLER_IMG}" - echo "${bright}Done loading new operator image on cluster ${CLUSTER_NAME}${reset}" if [[ "${TEST_UPGRADE_TO_V1}" != "false" ]]; then @@ -602,21 +519,17 @@ load() { PRIME_CONTROLLER_IMG="${CONTROLLER_IMG}-prime" CURRENT_VERSION=$(make -C "${ROOT_DIR}" -s print-version) PRIME_VERSION="${CURRENT_VERSION}-prime" - make -C "${ROOT_DIR}" CONTROLLER_IMG="${PRIME_CONTROLLER_IMG}" VERSION="${PRIME_VERSION}" \ - ARCH="${ARCH}" docker-build - - load_image "${CLUSTER_NAME}" "${PRIME_CONTROLLER_IMG}" + make -C "${ROOT_DIR}" CONTROLLER_IMG="${PRIME_CONTROLLER_IMG}" VERSION="${PRIME_VERSION}" insecure="true" \ + ARCH="${ARCH}" BUILDER_NAME="${builder_name}" docker-build echo "${bright}Done loading new 'prime' operator image on cluster ${CLUSTER_NAME}${reset}" fi + + docker buildx rm "${builder_name}" } deploy() { - if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_${ENGINE}"; then - ENABLE_REGISTRY=true - fi - - CONTROLLER_IMG="$(ENABLE_REGISTRY="${ENABLE_REGISTRY}" print_image)" + CONTROLLER_IMG="$(print_image)" echo "${bright}Deploying manifests from current worktree on cluster ${CLUSTER_NAME}${reset}" @@ -626,27 +539,23 @@ deploy() { } print_image() { - local tag=devel - if [ -n "${ENABLE_REGISTRY:-}" ] || "check_registry_${ENGINE}"; then - tag=latest - fi - echo "${registry_name}:5000/cloudnative-pg:${tag}" + echo "${registry_name}:5000/cloudnative-pg-testing:latest" } export_logs() { echo "${bright}Exporting logs from cluster ${CLUSTER_NAME} to ${LOG_DIR}${reset}" - "export_logs_${ENGINE}" "${CLUSTER_NAME}" + "export_logs_kind" "${CLUSTER_NAME}" echo "${bright}Done exporting logs from cluster ${CLUSTER_NAME} to ${LOG_DIR}${reset}" } destroy() { - echo "${bright}Destroying ${ENGINE} cluster ${CLUSTER_NAME}${reset}" + echo "${bright}Destroying kind cluster ${CLUSTER_NAME}${reset}" - "destroy_${ENGINE}" "${CLUSTER_NAME}" + "destroy_kind" "${CLUSTER_NAME}" - echo "${bright}Done destroying ${ENGINE} cluster ${CLUSTER_NAME}${reset}" + echo "${bright}Done destroying kind cluster ${CLUSTER_NAME}${reset}" } pyroscope() { @@ -672,13 +581,7 @@ main() { case "${o}" in -e | --engine) shift - ENGINE=$1 - shift - if [ "${ENGINE}" != "kind" ] && [ "${ENGINE}" != "k3d" ]; then - echo "ERROR: ${ENGINE} is not a valid engine! [kind, k3d]" >&2 - echo >&2 - usage - fi + # no-op, kept for compatibility ;; -k | --k8s-version) shift @@ -702,7 +605,7 @@ main() { ;; -r | --registry) shift - ENABLE_REGISTRY=true + # no-op, kept for compatibility ;; --) shift @@ -719,18 +622,11 @@ main() { fi if [ -z "${K8S_VERSION}" ]; then - case "${ENGINE}" in - kind) - K8S_VERSION=${KIND_NODE_DEFAULT_VERSION} - ;; - k3d) - K8S_VERSION=${K3D_NODE_DEFAULT_VERSION} - ;; - esac + K8S_VERSION=${KIND_NODE_DEFAULT_VERSION} fi KUBECTL_VERSION=${KUBECTL_VERSION:-$K8S_VERSION} - # Only here the K8S_VERSION veriable contains its final value + # Only here the K8S_VERSION variable contains its final value # so we can set the default cluster name CLUSTER_NAME=${CLUSTER_NAME:-pg-operator-e2e-${K8S_VERSION//./-}} @@ -740,16 +636,6 @@ main() { # Invoke the command case "$command" in - prepare) - if [ "$#" -eq 0 ]; then - echo "ERROR: prepare requires a destination directory" >&2 - echo >&2 - usage - fi - dest_dir=$1 - shift - prepare "${dest_dir}" - ;; create | load | load-helper-images | deploy | print-image | export-logs | destroy | pyroscope) ensure_registry diff --git a/hack/show-release-diffs.sh b/hack/show-release-diffs.sh index cdbacc8701..12a76801a7 100755 --- a/hack/show-release-diffs.sh +++ b/hack/show-release-diffs.sh @@ -1,5 +1,23 @@ #!/usr/bin/env bash ## +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## SPDX-License-Identifier: Apache-2.0 +## + ## CloudNativePG - Show diffs from main for a release branch ## ## This is a helper script that prints the GitHub pull requests @@ -19,21 +37,6 @@ ## This example compares the current branch with 1.15: ## ## ./hack/show-release-diffs.sh release-1.15 -## -## Copyright The CloudNativePG Contributors -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## set -o errexit -o nounset -o pipefail diff --git a/internal/cmd/manager/backup/cmd.go b/internal/cmd/manager/backup/cmd.go index 6a5c3ec346..f74bd8ab23 100644 --- a/internal/cmd/manager/backup/cmd.go +++ b/internal/cmd/manager/backup/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package backup implement the "controller backup" command @@ -33,18 +36,19 @@ import ( func NewCmd() *cobra.Command { cmd := cobra.Command{ Use: "backup [backup_name]", - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { + contextLogger := log.FromContext(cmd.Context()) backupURL := url.Local(url.PathPgBackup, url.LocalPort) resp, err := http.Get(backupURL + "?name=" + args[0]) if err != nil { - log.Error(err, "Error while requesting backup") + contextLogger.Error(err, "Error while requesting backup") return err } defer func() { err := resp.Body.Close() if err != nil { - log.Error(err, "Can't close the connection", + contextLogger.Error(err, "Can't close the connection", "backupURL", backupURL, "statusCode", resp.StatusCode, ) @@ -53,15 +57,15 @@ func NewCmd() *cobra.Command { body, err := io.ReadAll(resp.Body) if err != nil { - log.Error(err, "Error while reading backup response body", + contextLogger.Error(err, "Error while reading backup response body", "backupURL", backupURL, "statusCode", resp.StatusCode, ) return err } - if resp.StatusCode != 200 { - log.Info( + if resp.StatusCode != http.StatusOK { + contextLogger.Info( "Error while requesting backup", "backupURL", backupURL, "statusCode", resp.StatusCode, @@ -72,7 +76,7 @@ func NewCmd() *cobra.Command { _, err = os.Stderr.Write(body) if err != nil { - log.Error(err, "Error while starting a backup") + contextLogger.Error(err, "Error while starting a backup") return err } diff --git a/internal/cmd/manager/bootstrap/cmd.go b/internal/cmd/manager/bootstrap/cmd.go index ed87082bf7..fd85706d35 100644 --- a/internal/cmd/manager/bootstrap/cmd.go +++ b/internal/cmd/manager/bootstrap/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package bootstrap implement the "controller bootstrap" command @@ -33,9 +36,10 @@ func NewCmd() *cobra.Command { Use: "bootstrap [target]", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { + contextLogger := log.FromContext(cmd.Context()) dest := args[0] - log.Info("Installing the manager executable", + contextLogger.Info("Installing the manager executable", "destination", dest, "version", versions.Version, "build", versions.Info) @@ -44,13 +48,13 @@ func NewCmd() *cobra.Command { panic(err) } - log.Info("Setting 0750 permissions") + contextLogger.Info("Setting 0750 permissions") err = os.Chmod(dest, 0o750) // #nosec if err != nil { panic(err) } - log.Info("Bootstrap completed") + contextLogger.Info("Bootstrap completed") return nil }, diff --git a/internal/cmd/manager/controller/cmd.go b/internal/cmd/manager/controller/cmd.go index 9eb5629868..12c5f988a5 100644 --- a/internal/cmd/manager/controller/cmd.go +++ b/internal/cmd/manager/controller/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -34,6 +37,7 @@ func NewCmd() *cobra.Command { var pprofHTTPServer bool var leaderLeaseDuration int var leaderRenewDeadline int + var maxConcurrentReconciles int cmd := cobra.Command{ Use: "controller [flags]", @@ -50,6 +54,7 @@ func NewCmd() *cobra.Command { }, pprofHTTPServer, port, + maxConcurrentReconciles, configuration.Current, ) }, @@ -77,6 +82,12 @@ func NewCmd() *cobra.Command { false, "If true it will start a pprof debug http server on localhost:6060. Defaults to false.", ) + cmd.Flags().IntVar( + &maxConcurrentReconciles, + "max-concurrent-reconciles", + 10, + "The maximum number of concurrent reconciles. Defaults to 10.", + ) return &cmd } diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index ea66ab17f8..4da98cffdd 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller implement the command used to start the operator @@ -19,10 +22,8 @@ package controller import ( "context" - "errors" "fmt" "net/http" - "net/http/pprof" "time" "github.com/cloudnative-pg/machinery/pkg/log" @@ -34,14 +35,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - // +kubebuilder:scaffold:imports - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + webhookv1 "github.com/cloudnative-pg/cloudnative-pg/internal/webhook/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/multicache" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" @@ -98,18 +97,14 @@ func RunController( leaderConfig leaderElectionConfiguration, pprofDebug bool, port int, + maxConcurrentReconciles int, conf *configuration.Data, ) error { ctx := context.Background() - setupLog.Info("Starting CloudNativePG Operator", "version", versions.Version, "build", versions.Info) - if pprofDebug { - startPprofDebugServer(ctx) - } - managerOptions := ctrl.Options{ Scheme: scheme, Metrics: server.Options{ @@ -123,6 +118,7 @@ func RunController( Port: port, CertDir: defaultWebhookCertDir, }), + PprofBindAddress: getPprofServerAddress(pprofDebug), // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily // when the Manager ends. This requires the binary to immediately end when the // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly @@ -219,17 +215,19 @@ func RunController( } pluginRepository := repository.New() - if err := pluginRepository.RegisterUnixSocketPluginsInPath( + if _, err := pluginRepository.RegisterUnixSocketPluginsInPath( conf.PluginSocketDir, ); err != nil { setupLog.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") } + defer pluginRepository.Close() if err = controller.NewClusterReconciler( mgr, discoveryClient, pluginRepository, - ).SetupWithManager(ctx, mgr); err != nil { + conf.DrainTaints, + ).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Cluster") return err } @@ -243,10 +241,8 @@ func RunController( return err } - if err = controller.NewPluginReconciler( - mgr, - pluginRepository, - ).SetupWithManager(mgr, configuration.Current.OperatorNamespace); err != nil { + if err = controller.NewPluginReconciler(mgr, conf.OperatorNamespace, pluginRepository). + SetupWithManager(mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Plugin") return err } @@ -255,7 +251,7 @@ func RunController( Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg-scheduledbackup"), - }).SetupWithManager(ctx, mgr); err != nil { + }).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ScheduledBackup") return err } @@ -265,31 +261,36 @@ func RunController( DiscoveryClient: discoveryClient, Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg-pooler"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Pooler") return err } - if err = (&apiv1.Cluster{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupClusterWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Cluster", "version", "v1") return err } - if err = (&apiv1.Backup{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupBackupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Backup", "version", "v1") return err } - if err = (&apiv1.ScheduledBackup{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupScheduledBackupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ScheduledBackup", "version", "v1") return err } - if err = (&apiv1.Pooler{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupPoolerWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Pooler", "version", "v1") return err } + if err = webhookv1.SetupDatabaseWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Database", "version", "v1") + return err + } + // Setup the handler used by the readiness and liveliness probe. // // Unfortunately the readiness of the probe is not sufficient for the operator to be @@ -464,39 +465,10 @@ func readSecret( return data, nil } -// startPprofDebugServer exposes pprof debug server if the pprof-server env variable is set to true -func startPprofDebugServer(ctx context.Context) { - mux := http.NewServeMux() - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - - pprofServer := http.Server{ - Addr: "0.0.0.0:6060", - Handler: mux, - ReadTimeout: webserver.DefaultReadTimeout, - ReadHeaderTimeout: webserver.DefaultReadHeaderTimeout, +func getPprofServerAddress(enabled bool) string { + if enabled { + return "0.0.0.0:6060" } - setupLog.Info("Starting pprof HTTP server", "addr", pprofServer.Addr) - - go func() { - go func() { - <-ctx.Done() - - setupLog.Info("shutting down pprof HTTP server") - ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelFunc() - - if err := pprofServer.Shutdown(ctx); err != nil { - setupLog.Error(err, "Failed to shutdown pprof HTTP server") - } - }() - - if err := pprofServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { - setupLog.Error(err, "Failed to start pprof HTTP server") - } - }() + return "" } diff --git a/internal/cmd/manager/debug/architectures/cmd.go b/internal/cmd/manager/debug/architectures/cmd.go index 59ff5fd390..b14c81702a 100644 --- a/internal/cmd/manager/debug/architectures/cmd.go +++ b/internal/cmd/manager/debug/architectures/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package architectures implement the show-architectures command @@ -32,9 +35,10 @@ func NewCmd() *cobra.Command { cmd := cobra.Command{ Use: "show-architectures", Short: "Lists all the CPU architectures supported by this image", - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { + contextLogger := log.FromContext(cmd.Context()) if err := run(); err != nil { - log.Error(err, "Error while extracting the list of supported architectures") + contextLogger.Error(err, "Error while extracting the list of supported architectures") return err } diff --git a/internal/cmd/manager/debug/cmd.go b/internal/cmd/manager/debug/cmd.go index e46fdbc735..23250fdd7b 100644 --- a/internal/cmd/manager/debug/cmd.go +++ b/internal/cmd/manager/debug/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package debug implement the debug command subfeatures diff --git a/internal/cmd/manager/instance/cmd.go b/internal/cmd/manager/instance/cmd.go index 7cf773b66f..62ac1e068f 100644 --- a/internal/cmd/manager/instance/cmd.go +++ b/internal/cmd/manager/instance/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package instance implements the "instance" subcommand of the operator @@ -19,6 +22,7 @@ package instance import ( "fmt" + "os" "github.com/spf13/cobra" @@ -29,6 +33,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/restoresnapshot" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/run" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/status" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/upgrade" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // NewCmd creates the "instance" command @@ -39,6 +45,9 @@ func NewCmd() *cobra.Command { RunE: func(_ *cobra.Command, _ []string) error { return fmt.Errorf("missing subcommand") }, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + return os.MkdirAll(postgres.TemporaryDirectory, 0o1777) //nolint:gosec + }, } cmd.AddCommand(initdb.NewCmd()) @@ -48,6 +57,7 @@ func NewCmd() *cobra.Command { cmd.AddCommand(pgbasebackup.NewCmd()) cmd.AddCommand(restore.NewCmd()) cmd.AddCommand(restoresnapshot.NewCmd()) + cmd.AddCommand(upgrade.NewCmd()) return cmd } diff --git a/internal/cmd/manager/instance/initdb/cmd.go b/internal/cmd/manager/instance/initdb/cmd.go index bbf59790d2..9406d7b5d5 100644 --- a/internal/cmd/manager/instance/initdb/cmd.go +++ b/internal/cmd/manager/instance/initdb/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package initdb implements the "instance init" subcommand of the operator @@ -60,28 +63,29 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() + contextLogger := log.FromContext(ctx) initDBFlags, err := shellquote.Split(initDBFlagsString) if err != nil { - log.Error(err, "Error while parsing initdb flags") + contextLogger.Error(err, "Error while parsing initdb flags") return err } postInitSQL, err := shellquote.Split(postInitSQLStr) if err != nil { - log.Error(err, "Error while parsing post init SQL queries") + contextLogger.Error(err, "Error while parsing post init SQL queries") return err } postInitApplicationSQL, err := shellquote.Split(postInitApplicationSQLStr) if err != nil { - log.Error(err, "Error while parsing post init template SQL queries") + contextLogger.Error(err, "Error while parsing post init template SQL queries") return err } postInitTemplateSQL, err := shellquote.Split(postInitTemplateSQLStr) if err != nil { - log.Error(err, "Error while parsing post init template SQL queries") + contextLogger.Error(err, "Error while parsing post init template SQL queries") return err } @@ -116,9 +120,9 @@ func NewCmd() *cobra.Command { }, } - cmd.Flags().StringVar(&appDBName, "app-db-name", "app", + cmd.Flags().StringVar(&appDBName, "app-db-name", "", "The name of the application containing the database") - cmd.Flags().StringVar(&appUser, "app-user", "app", + cmd.Flags().StringVar(&appUser, "app-user", "", "The name of the application user") cmd.Flags().StringVar(&clusterName, "cluster-name", os.Getenv("CLUSTER_NAME"), "The name of the "+ "current cluster in k8s, used to coordinate switchover and failover") @@ -148,14 +152,15 @@ func NewCmd() *cobra.Command { } func initSubCommand(ctx context.Context, info postgres.InitInfo) error { - err := info.CheckTargetDataDirectory(ctx) + contextLogger := log.FromContext(ctx) + err := info.EnsureTargetDirectoriesDoNotExist(ctx) if err != nil { return err } err = info.Bootstrap(ctx) if err != nil { - log.Error(err, "Error while bootstrapping data directory") + contextLogger.Error(err, "Error while bootstrapping data directory") return err } diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index 424ff8bb41..dbea010f2c 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package join implements the "instance join" subcommand of the operator @@ -26,12 +29,11 @@ import ( ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" + instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" ) // NewCmd creates the new "join" command @@ -53,14 +55,13 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() - instance := postgres.NewInstance() - - // The following are needed to correctly + // The fields in the instance are needed to correctly // download the secret containing the TLS // certificates - instance.Namespace = namespace - instance.PodName = podName - instance.ClusterName = clusterName + instance := postgres.NewInstance(). + WithNamespace(namespace). + WithPodName(podName). + WithClusterName(clusterName) info := postgres.InitInfo{ PgData: pgData, @@ -94,45 +95,37 @@ func NewCmd() *cobra.Command { } func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postgres.InitInfo) error { - if err := info.CheckTargetDataDirectory(ctx); err != nil { + contextLogger := log.FromContext(ctx) + + if err := info.EnsureTargetDirectoriesDoNotExist(ctx); err != nil { return err } client, err := management.NewControllerRuntimeClient() if err != nil { - log.Error(err, "Error creating Kubernetes client") + contextLogger.Error(err, "Error creating Kubernetes client") return err } - // Create a fake reconciler just to download the secrets and - // the cluster definition - metricExporter := metricserver.NewExporter(instance) - reconciler := controller.NewInstanceReconciler(instance, client, metricExporter) - // Download the cluster definition from the API server var cluster apiv1.Cluster - if err := reconciler.GetClient().Get(ctx, - ctrl.ObjectKey{Namespace: instance.Namespace, Name: instance.ClusterName}, + if err := client.Get(ctx, + ctrl.ObjectKey{Namespace: instance.GetNamespaceName(), Name: instance.GetClusterName()}, &cluster, ); err != nil { - log.Error(err, "Error while getting cluster") + contextLogger.Error(err, "Error while getting cluster") return err } + instance.Cluster = &cluster - // Since we're directly using the reconciler here, we cannot - // tell if the secrets were correctly downloaded or not. - // If they were the following "pg_basebackup" command will work, if - // they don't "pg_basebackup" with fail, complaining that the - // cryptographic material is not available. - // So it doesn't make a real difference. - // - // Besides this, we should improve this situation to have - // a real error handling. - reconciler.RefreshSecrets(ctx, &cluster) + if _, err := instancecertificate.NewReconciler(client, instance).RefreshSecrets(ctx, &cluster); err != nil { + contextLogger.Error(err, "Error while refreshing secrets") + return err + } // Run "pg_basebackup" to download the data directory from the primary - if err := info.Join(&cluster); err != nil { - log.Error(err, "Error joining node") + if err := info.Join(ctx, &cluster); err != nil { + contextLogger.Error(err, "Error joining node") return err } diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index 433770cec6..8196994f71 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbasebackup implement the pgbasebackup bootstrap method @@ -57,7 +60,10 @@ func NewCmd() *cobra.Command { Namespace: namespace, }) }, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + contextLogger := log.FromContext(ctx) + client, err := management.NewControllerRuntimeClient() if err != nil { return err @@ -73,10 +79,8 @@ func NewCmd() *cobra.Command { client: client, } - ctx := context.Background() - if err = env.bootstrapUsingPgbasebackup(ctx); err != nil { - log.Error(err, "Unable to boostrap cluster") + contextLogger.Error(err, "Unable to bootstrap cluster") } return err }, @@ -128,22 +132,13 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { return err } - pgVersion, err := cluster.GetPostgresqlVersion() - if err != nil { - log.Warning( - "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", - "imageName", cluster.GetImageName(), - "err", err) - } else if pgVersion >= 120000 { - // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. - // A short timeout could not be enough in case the instance is slow to send data, - // like when the I/O is overloaded. - connectionString += " options='-c wal_sender_timeout=0s'" - } + // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. + // A short timeout could not be enough in case the instance is slow to send data, + // like when the I/O is overloaded. + connectionString += " options='-c wal_sender_timeout=0s'" - err = postgres.ClonePgData(connectionString, env.info.PgData, env.info.PgWal) - if err != nil { - return err + if err := postgres.ClonePgData(ctx, connectionString, env.info.PgData, env.info.PgWal); err != nil { + return fmt.Errorf("while cloning pgdata: %w", err) } if cluster.IsReplica() { @@ -158,11 +153,11 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { // configureInstanceAsNewPrimary sets up this instance as a new primary server, using // the configuration created by the user and setting up the global objects as needed func (env *CloneInfo) configureInstanceAsNewPrimary(ctx context.Context, cluster *apiv1.Cluster) error { - if err := env.info.WriteInitialPostgresqlConf(cluster); err != nil { + if err := env.info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { return err } - if err := env.info.WriteRestoreHbaConf(); err != nil { + if err := env.info.WriteRestoreHbaConf(ctx); err != nil { return err } diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index 4d94211861..2cdea6e1c8 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package restore implements the "instance restore" subcommand of the operator package restore import ( @@ -22,46 +24,92 @@ import ( "errors" "os" - barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" - "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - ctrl "sigs.k8s.io/controller-runtime/pkg/client" - + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" ) // NewCmd creates the "restore" subcommand func NewCmd() *cobra.Command { - var clusterName string - var namespace string - var pgData string - var pgWal string + var ( + clusterName string + namespace string + pgData string + pgWal string + ) cmd := &cobra.Command{ Use: "restore [flags]", SilenceErrors: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - return management.WaitForGetCluster(cmd.Context(), ctrl.ObjectKey{ - Name: clusterName, - Namespace: namespace, - }) - }, RunE: func(cmd *cobra.Command, _ []string) error { - ctx := cmd.Context() + contextLogger := log.FromContext(cmd.Context()) + + // Canceling this context + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + // Step 1: build the manager + mgr, err := buildManager(clusterName, namespace) + if err != nil { + contextLogger.Error(err, "while building the manager") + return err + } + + // Step 1.1: add the local webserver to the manager + localSrv, err := webserver.NewLocalWebServer( + postgres.NewInstance().WithClusterName(clusterName).WithNamespace(namespace), + mgr.GetClient(), + mgr.GetEventRecorderFor("local-webserver"), + ) + if err != nil { + return err + } + if err = mgr.Add(localSrv); err != nil { + contextLogger.Error(err, "unable to add local webserver runnable") + return err + } - info := postgres.InitInfo{ - ClusterName: clusterName, - Namespace: namespace, - PgData: pgData, - PgWal: pgWal, + // Step 2: add the restore process to the manager + restoreProcess := restoreRunnable{ + cli: mgr.GetClient(), + clusterName: clusterName, + namespace: namespace, + pgData: pgData, + pgWal: pgWal, + cancel: cancel, + } + if mgr.Add(&restoreProcess) != nil { + contextLogger.Error(err, "while building the restore process") + return err } - return restoreSubCommand(ctx, info) + // Step 3: start everything + if err := mgr.Start(ctx); err != nil { + contextLogger.Error(err, "restore error") + return err + } + + if !errors.Is(ctx.Err(), context.Canceled) { + contextLogger.Error(err, "error while recovering backup") + return err + } + + return nil }, + PostRunE: func(cmd *cobra.Command, _ []string) error { if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { return err @@ -81,37 +129,32 @@ func NewCmd() *cobra.Command { return cmd } -func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error { - err := info.CheckTargetDataDirectory(ctx) - if err != nil { - return err - } - - err = info.Restore(ctx) - if err != nil { - log.Error(err, "Error while restoring a backup") - cleanupDataDirectoryIfNeeded(err, info.PgData) - return err - } - - return nil -} - -func cleanupDataDirectoryIfNeeded(restoreError error, dataDirectory string) { - var barmanError *barmanCommand.CloudRestoreError - if !errors.As(restoreError, &barmanError) { - return - } - - if !barmanError.IsRetriable() { - return - } - - log.Info("Cleaning up data directory", "directory", dataDirectory) - if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) { - log.Error( - err, - "error occurred cleaning up data directory", - "directory", dataDirectory) - } +func buildManager(clusterName string, namespace string) (manager.Manager, error) { + return controllerruntime.NewManager(controllerruntime.GetConfigOrDie(), controllerruntime.Options{ + Scheme: scheme.BuildWithAllKnownScheme(), + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &apiv1.Cluster{}: { + Field: fields.OneTermEqualSelector("metadata.name", clusterName), + Namespaces: map[string]cache.Config{ + namespace: {}, + }, + }, + }, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + // todo(armru): we should remove the backup endpoints from the local webserver + &apiv1.Backup{}, + }, + }, + }, + LeaderElection: false, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + }) } diff --git a/internal/cmd/manager/instance/restore/doc.go b/internal/cmd/manager/instance/restore/doc.go new file mode 100644 index 0000000000..5a94c00223 --- /dev/null +++ b/internal/cmd/manager/instance/restore/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package restore implements the "instance restore" subcommand of the operator +package restore diff --git a/internal/cmd/manager/instance/restore/restore.go b/internal/cmd/manager/instance/restore/restore.go new file mode 100644 index 0000000000..ad9d90e33f --- /dev/null +++ b/internal/cmd/manager/instance/restore/restore.go @@ -0,0 +1,108 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package restore + +import ( + "context" + "errors" + "fmt" + "os" + + barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +type restoreRunnable struct { + cli client.Client + clusterName string + namespace string + pgData string + pgWal string + cancel context.CancelFunc +} + +func (r *restoreRunnable) Start(ctx context.Context) error { + // we will wait this way for the mgr and informers to be online + if err := management.WaitForGetClusterWithClient(ctx, r.cli, client.ObjectKey{ + Name: r.clusterName, + Namespace: r.namespace, + }); err != nil { + return fmt.Errorf("while waiting for API server connectivity: %w", err) + } + + info := postgres.InitInfo{ + ClusterName: r.clusterName, + Namespace: r.namespace, + PgData: r.pgData, + PgWal: r.pgWal, + } + + if err := restoreSubCommand(ctx, info, r.cli); err != nil { + return fmt.Errorf("while restoring cluster: %s", err) + } + + // the backup was restored correctly and we now ask + // the manager to quit + r.cancel() + return nil +} + +func restoreSubCommand(ctx context.Context, info postgres.InitInfo, cli client.Client) error { + contextLogger := log.FromContext(ctx) + if err := info.EnsureTargetDirectoriesDoNotExist(ctx); err != nil { + return err + } + + if err := info.Restore(ctx, cli); err != nil { + contextLogger.Error(err, "Error while restoring a backup") + cleanupDataDirectoryIfNeeded(ctx, err, info.PgData) + return err + } + + contextLogger.Info("restore command execution completed without errors") + + return nil +} + +func cleanupDataDirectoryIfNeeded(ctx context.Context, restoreError error, dataDirectory string) { + contextLogger := log.FromContext(ctx) + + var barmanError *barmanCommand.CloudRestoreError + if !errors.As(restoreError, &barmanError) { + return + } + + if !barmanError.IsRetriable() { + return + } + + contextLogger.Info("Cleaning up data directory", "directory", dataDirectory) + if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) { + contextLogger.Error( + err, + "error occurred cleaning up data directory", + "directory", dataDirectory) + } +} diff --git a/internal/cmd/manager/instance/restoresnapshot/cmd.go b/internal/cmd/manager/instance/restoresnapshot/cmd.go index d3e22e890c..72ea1d1910 100644 --- a/internal/cmd/manager/instance/restoresnapshot/cmd.go +++ b/internal/cmd/manager/instance/restoresnapshot/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,24 +13,34 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package restoresnapshot implements the "instance restoresnapshot" subcommand of the operator package restoresnapshot import ( "context" "encoding/base64" + "errors" "os" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - ctrl "sigs.k8s.io/controller-runtime/pkg/client" - + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" ) // NewCmd creates the "restoresnapshot" subcommand @@ -47,20 +58,47 @@ func NewCmd() *cobra.Command { cmd := &cobra.Command{ Use: "restoresnapshot [flags]", SilenceErrors: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - return management.WaitForGetCluster(cmd.Context(), ctrl.ObjectKey{ - Name: clusterName, - Namespace: namespace, - }) - }, RunE: func(cmd *cobra.Command, _ []string) error { - ctx := cmd.Context() + contextLogger := log.FromContext(cmd.Context()) + + // Canceling this context + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + // Step 1: build the manager + mgr, err := buildManager(clusterName, namespace) + if err != nil { + contextLogger.Error(err, "while building the manager") + return err + } + + // Step 1.1: add the local webserver to the manager + localSrv, err := webserver.NewLocalWebServer( + postgres.NewInstance().WithClusterName(clusterName).WithNamespace(namespace), + mgr.GetClient(), + mgr.GetEventRecorderFor("local-webserver"), + ) + if err != nil { + return err + } + if err = mgr.Add(localSrv); err != nil { + contextLogger.Error(err, "unable to add local webserver runnable") + return err + } - info := postgres.InitInfo{ - ClusterName: clusterName, - Namespace: namespace, - PgData: pgData, - PgWal: pgWal, + // Step 2: add the restore process to the manager + restoreProcess := restoreRunnable{ + cli: mgr.GetClient(), + clusterName: clusterName, + namespace: namespace, + pgData: pgData, + pgWal: pgWal, + immediate: immediate, + cancel: cancel, + } + if mgr.Add(&restoreProcess) != nil { + contextLogger.Error(err, "while building the restore process") + return err } if backupLabel != "" { @@ -68,7 +106,7 @@ func NewCmd() *cobra.Command { if err != nil { return err } - info.BackupLabelFile = res + restoreProcess.backupLabelFile = res } if tablespaceMap != "" { @@ -76,15 +114,23 @@ func NewCmd() *cobra.Command { if err != nil { return err } - info.TablespaceMapFile = res + restoreProcess.tablespaceMapFile = res } - err := execute(ctx, info, immediate) - if err != nil { - log.Error(err, "Error while recovering Volume Snapshot backup") + // Step 3: start everything + if err := mgr.Start(ctx); err != nil { + contextLogger.Error(err, "restore error") + return err + } + + if !errors.Is(ctx.Err(), context.Canceled) { + contextLogger.Error(err, "error while recovering backup") + return err } - return err + + return nil }, + PostRunE: func(cmd *cobra.Command, _ []string) error { if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { return err @@ -107,11 +153,32 @@ func NewCmd() *cobra.Command { return cmd } -func execute(ctx context.Context, info postgres.InitInfo, immediate bool) error { - typedClient, err := management.NewControllerRuntimeClient() - if err != nil { - return err - } - - return info.RestoreSnapshot(ctx, typedClient, immediate) +func buildManager(clusterName string, namespace string) (manager.Manager, error) { + return controllerruntime.NewManager(controllerruntime.GetConfigOrDie(), controllerruntime.Options{ + Scheme: scheme.BuildWithAllKnownScheme(), + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &apiv1.Cluster{}: { + Field: fields.OneTermEqualSelector("metadata.name", clusterName), + Namespaces: map[string]cache.Config{ + namespace: {}, + }, + }, + }, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + // todo(armru): we should remove the backup endpoints from the local webserver + &apiv1.Backup{}, + }, + }, + }, + LeaderElection: false, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + }) } diff --git a/internal/cmd/manager/instance/restoresnapshot/doc.go b/internal/cmd/manager/instance/restoresnapshot/doc.go index ecd62ffa13..be6dbbe67b 100644 --- a/internal/cmd/manager/instance/restoresnapshot/doc.go +++ b/internal/cmd/manager/instance/restoresnapshot/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package restoresnapshot implements the job command that bootstraps the snapshot volumes diff --git a/internal/cmd/manager/instance/restoresnapshot/restore.go b/internal/cmd/manager/instance/restoresnapshot/restore.go new file mode 100644 index 0000000000..1a065b12ce --- /dev/null +++ b/internal/cmd/manager/instance/restoresnapshot/restore.go @@ -0,0 +1,74 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package restoresnapshot + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +type restoreRunnable struct { + cli client.Client + clusterName string + namespace string + pgData string + pgWal string + backupLabelFile []byte + tablespaceMapFile []byte + immediate bool + cancel context.CancelFunc +} + +func (r *restoreRunnable) Start(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + + // we will wait this way for the mgr and informers to be online + if err := management.WaitForGetClusterWithClient(ctx, r.cli, client.ObjectKey{ + Name: r.clusterName, + Namespace: r.namespace, + }); err != nil { + return fmt.Errorf("while waiting for API server connectivity: %w", err) + } + + info := postgres.InitInfo{ + ClusterName: r.clusterName, + Namespace: r.namespace, + PgData: r.pgData, + PgWal: r.pgWal, + BackupLabelFile: r.backupLabelFile, + TablespaceMapFile: r.tablespaceMapFile, + } + + if err := info.RestoreSnapshot(ctx, r.cli, r.immediate); err != nil { + contextLogger.Error(err, "Error while restoring a backup") + return err + } + + // the backup was restored correctly and we now ask + // the manager to quit + r.cancel() + return nil +} diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index f1007407ca..2d8713beed 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package run implements the "instance run" subcommand of the operator @@ -23,6 +26,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" @@ -39,6 +43,8 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/run/lifecycle" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/externalservers" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles" @@ -50,18 +56,24 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/logpipe" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" pg "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + instancestorage "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/storage" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) var ( scheme = runtime.NewScheme() - // errNoFreeWALSpace is raised when there's not enough disk space - // to store two WAL files + // errNoFreeWALSpace is returned when there isn't enough disk space + // available to store at least two WAL files. errNoFreeWALSpace = fmt.Errorf("no free disk space for WALs") + + // errWALArchivePluginNotAvailable is returned when the configured + // WAL archiving plugin is not available or cannot be found. + errWALArchivePluginNotAvailable = fmt.Errorf("WAL archive plugin not available") ) func init() { @@ -87,13 +99,16 @@ func NewCmd() *cobra.Command { }) }, RunE: func(cmd *cobra.Command, _ []string) error { - ctx := log.IntoContext(cmd.Context(), log.GetLogger()) - instance := postgres.NewInstance() + ctx := log.IntoContext( + cmd.Context(), + log.GetLogger().WithValues("logger", "instance-manager"), + ) + instance := postgres.NewInstance(). + WithPodName(podName). + WithClusterName(clusterName). + WithNamespace(namespace) instance.PgData = pgData - instance.Namespace = namespace - instance.PodName = podName - instance.ClusterName = clusterName instance.StatusPortTLS = statusPortTLS instance.MetricsPortTLS = metricsPortTLS @@ -104,6 +119,9 @@ func NewCmd() *cobra.Command { if errors.Is(err, errNoFreeWALSpace) { os.Exit(apiv1.MissingWALDiskSpaceExitCode) } + if errors.Is(err, errWALArchivePluginNotAvailable) { + os.Exit(apiv1.MissingWALArchivePlugin) + } return err }, @@ -130,20 +148,20 @@ func NewCmd() *cobra.Command { return cmd } -func runSubCommand(ctx context.Context, instance *postgres.Instance) error { +func runSubCommand(ctx context.Context, instance *postgres.Instance) error { //nolint:gocognit,gocyclo var err error - setupLog := log.WithName("setup") - setupLog.Info("Starting CloudNativePG Instance Manager", + contextLogger := log.FromContext(ctx) + contextLogger.Info("Starting CloudNativePG Instance Manager", "version", versions.Version, "build", versions.Info) - setupLog.Info("Checking for free disk space for WALs before starting PostgreSQL") + contextLogger.Info("Checking for free disk space for WALs before starting PostgreSQL") hasDiskSpaceForWals, err := instance.CheckHasDiskSpaceForWAL(ctx) if err != nil { - setupLog.Error(err, "Error while checking if there is enough disk space for WALs, skipping") + contextLogger.Error(err, "Error while checking if there is enough disk space for WALs, skipping") } else if !hasDiskSpaceForWals { - setupLog.Info("Detected low-disk space condition, avoid starting the instance") + contextLogger.Info("Detected low-disk space condition, avoid starting the instance") return errNoFreeWALSpace } @@ -152,14 +170,24 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ &apiv1.Cluster{}: { - Field: fields.OneTermEqualSelector("metadata.name", instance.ClusterName), + Field: fields.OneTermEqualSelector("metadata.name", instance.GetClusterName()), Namespaces: map[string]cache.Config{ - instance.Namespace: {}, + instance.GetNamespaceName(): {}, }, }, &apiv1.Database{}: { Namespaces: map[string]cache.Config{ - instance.Namespace: {}, + instance.GetNamespaceName(): {}, + }, + }, + &apiv1.Publication{}: { + Namespaces: map[string]cache.Config{ + instance.GetNamespaceName(): {}, + }, + }, + &apiv1.Subscription{}: { + Namespaces: map[string]cache.Config{ + instance.GetNamespaceName(): {}, }, }, }, @@ -174,39 +202,67 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { // we don't have the permissions to cache backups, as the ServiceAccount // doesn't have watch permission on the backup status &apiv1.Backup{}, + // we don't have the permissions to cache FailoverQuorum objects, we can + // only access the object having the same name as the cluster + &apiv1.FailoverQuorum{}, }, }, }, Metrics: server.Options{ BindAddress: "0", // TODO: merge metrics to the manager one }, + BaseContext: func() context.Context { + return ctx + }, + Logger: contextLogger.WithValues("logging_pod", os.Getenv("POD_NAME")).GetLogger(), }) if err != nil { - setupLog.Error(err, "unable to set up overall controller manager") + contextLogger.Error(err, "unable to set up overall controller manager") return err } postgresStartConditions := concurrency.MultipleExecuted{} exitedConditions := concurrency.MultipleExecuted{} - metricsExporter := metricserver.NewExporter(instance) - reconciler := controller.NewInstanceReconciler(instance, mgr.GetClient(), metricsExporter) + var loadedPluginNames []string + pluginRepository := repository.New() + if loadedPluginNames, err = pluginRepository.RegisterUnixSocketPluginsInPath( + configuration.Current.PluginSocketDir, + ); err != nil { + contextLogger.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") + } + defer pluginRepository.Close() + + metricsExporter := metricserver.NewExporter(instance, metrics.NewPluginCollector(pluginRepository)) + reconciler := controller.NewInstanceReconciler(instance, mgr.GetClient(), metricsExporter, pluginRepository) err = ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("instance-cluster"). Complete(reconciler) if err != nil { - setupLog.Error(err, "unable to create instance controller") + contextLogger.Error(err, "unable to create instance controller") return err } postgresStartConditions = append(postgresStartConditions, reconciler.GetExecutedCondition()) // database reconciler dbReconciler := controller.NewDatabaseReconciler(mgr, instance) - err = ctrl.NewControllerManagedBy(mgr). - For(&apiv1.Database{}). - Complete(dbReconciler) - if err != nil { - setupLog.Error(err, "unable to create database controller") + if err := dbReconciler.SetupWithManager(mgr); err != nil { + contextLogger.Error(err, "unable to create database controller") + return err + } + + // database publication reconciler + publicationReconciler := controller.NewPublicationReconciler(mgr, instance) + if err := publicationReconciler.SetupWithManager(mgr); err != nil { + contextLogger.Error(err, "unable to create publication controller") + return err + } + + // database subscription reconciler + subscriptionReconciler := controller.NewSubscriptionReconciler(mgr, instance) + if err := subscriptionReconciler.SetupWithManager(mgr); err != nil { + contextLogger.Error(err, "unable to create subscription controller") return err } @@ -235,30 +291,30 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { postgresStartConditions = append(postgresStartConditions, jsonPipe.GetExecutedCondition()) exitedConditions = append(exitedConditions, jsonPipe.GetExitedCondition()) - if err := reconciler.ReconcileWalStorage(ctx); err != nil { + if err := instancestorage.ReconcileWalDirectory(ctx); err != nil { return err } postgresLifecycleManager := lifecycle.NewPostgres(ctx, instance, postgresStartConditions) if err = mgr.Add(postgresLifecycleManager); err != nil { - setupLog.Error(err, "unable to create instance runnable") + contextLogger.Error(err, "unable to create instance runnable") return err } if err = mgr.Add(lifecycle.NewPostgresOrphansReaper(instance)); err != nil { - setupLog.Error(err, "unable to create zombie reaper") + contextLogger.Error(err, "unable to create zombie reaper") return err } slotReplicator := runner.NewReplicator(instance) if err = mgr.Add(slotReplicator); err != nil { - setupLog.Error(err, "unable to create slot replicator") + contextLogger.Error(err, "unable to create slot replicator") return err } roleSynchronizer := roles.NewRoleSynchronizer(instance, reconciler.GetClient()) if err = mgr.Add(roleSynchronizer); err != nil { - setupLog.Error(err, "unable to create role synchronizer") + contextLogger.Error(err, "unable to create role synchronizer") return err } @@ -275,7 +331,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } if err = mgr.Add(remoteSrv); err != nil { - setupLog.Error(err, "unable to add remote webserver runnable") + contextLogger.Error(err, "unable to add remote webserver runnable") return err } @@ -288,7 +344,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } if err = mgr.Add(localSrv); err != nil { - setupLog.Error(err, "unable to add local webserver runnable") + contextLogger.Error(err, "unable to add local webserver runnable") return err } @@ -297,37 +353,48 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } if err = mgr.Add(metricsServer); err != nil { - setupLog.Error(err, "unable to add local webserver runnable") + contextLogger.Error(err, "unable to add local webserver runnable") return err } - setupLog.Info("starting tablespace manager") + contextLogger.Info("starting tablespace manager") if err := tablespaces.NewTablespaceReconciler(instance, mgr.GetClient()). SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create tablespace reconciler") + contextLogger.Error(err, "unable to create tablespace reconciler") return err } - setupLog.Info("starting external server manager") + contextLogger.Info("starting external server manager") if err := externalservers.NewReconciler(instance, mgr.GetClient()). SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create external servers reconciler") + contextLogger.Error(err, "unable to create external servers reconciler") return err } - setupLog.Info("starting controller-runtime manager") + contextLogger.Info("starting controller-runtime manager") if err := mgr.Start(onlineUpgradeCtx); err != nil { - setupLog.Error(err, "unable to run controller-runtime manager") + contextLogger.Error(err, "unable to run controller-runtime manager") return makeUnretryableError(err) } - setupLog.Info("Checking for free disk space for WALs after PostgreSQL finished") + contextLogger.Info("Checking for free disk space for WALs after PostgreSQL finished") hasDiskSpaceForWals, err = instance.CheckHasDiskSpaceForWAL(ctx) if err != nil { - setupLog.Error(err, "Error while checking if there is enough disk space for WALs, skipping") + contextLogger.Error(err, "Error while checking if there is enough disk space for WALs, skipping") } else if !hasDiskSpaceForWals { - setupLog.Info("Detected low-disk space condition") - return errNoFreeWALSpace + contextLogger.Info("Detected low-disk space condition") + return makeUnretryableError(errNoFreeWALSpace) + } + + if instance.Cluster != nil { + enabledArchiverPluginName := instance.Cluster.GetEnabledWALArchivePluginName() + if enabledArchiverPluginName != "" && !slices.Contains(loadedPluginNames, enabledArchiverPluginName) { + contextLogger.Info( + "Detected missing WAL archiver plugin, waiting for the operator to rollout a new instance Pod", + "enabledArchiverPluginName", enabledArchiverPluginName, + "loadedPluginNames", loadedPluginNames) + return makeUnretryableError(errWALArchivePluginNotAvailable) + } } return nil diff --git a/internal/cmd/manager/instance/run/errors.go b/internal/cmd/manager/instance/run/errors.go index 2b47796d8a..44900d23f2 100644 --- a/internal/cmd/manager/instance/run/errors.go +++ b/internal/cmd/manager/instance/run/errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/instance/run/errors_test.go b/internal/cmd/manager/instance/run/errors_test.go index 063059d2fd..548dd90e57 100644 --- a/internal/cmd/manager/instance/run/errors_test.go +++ b/internal/cmd/manager/instance/run/errors_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/instance/run/lifecycle/doc.go b/internal/cmd/manager/instance/run/lifecycle/doc.go index e092b18ce1..e00e9d7698 100644 --- a/internal/cmd/manager/instance/run/lifecycle/doc.go +++ b/internal/cmd/manager/instance/run/lifecycle/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package lifecycle contains the code to run and manage the lifecycle of a postgres Instance diff --git a/internal/cmd/manager/instance/run/lifecycle/lifecycle.go b/internal/cmd/manager/instance/run/lifecycle/lifecycle.go index 1e223e3e22..cc351ff90d 100644 --- a/internal/cmd/manager/instance/run/lifecycle/lifecycle.go +++ b/internal/cmd/manager/instance/run/lifecycle/lifecycle.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package lifecycle diff --git a/internal/cmd/manager/instance/run/lifecycle/reaper.go b/internal/cmd/manager/instance/run/lifecycle/reaper.go index 68f32ba3ea..7a4f47b4cd 100644 --- a/internal/cmd/manager/instance/run/lifecycle/reaper.go +++ b/internal/cmd/manager/instance/run/lifecycle/reaper.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package lifecycle @@ -99,7 +102,7 @@ func (z *PostgresOrphansReaper) handleSignal(contextLogger log.Logger) error { pidFile := path.Join(z.instance.PgData, postgres.PostgresqlPidFile) _, postMasterPid, _ := z.instance.GetPostmasterPidFromFile(pidFile) for _, p := range processes { - if p.PPid() == 1 && p.Executable() == "postgres" { + if p.PPid() == 1 && p.Executable() == postgres.GetPostgresExecutableName() { pid := p.Pid() if pid == postMasterPid { continue diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index 88a133dec7..83c08e3ecd 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package lifecycle @@ -19,6 +22,7 @@ package lifecycle import ( "context" "database/sql" + "errors" "fmt" "sync" @@ -27,7 +31,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" ) var identifierStreamingReplicationUser = pgx.Identifier{apiv1.StreamingReplicationUser}.Sanitize() @@ -74,6 +77,12 @@ func (i *PostgresLifecycle) runPostgresAndWait(ctx context.Context) <-chan error // following will be a no-op. i.systemInitialization.Wait() + // If the system initialization failed, we return an error and let + // the instance manager quit. + if i.systemInitialization.Err() != nil { + return err + } + // The lifecycle loop will call us even when PostgreSQL is fenced. // In that case there's no need to proceed. if i.instance.IsFenced() { @@ -98,7 +107,7 @@ func (i *PostgresLifecycle) runPostgresAndWait(ctx context.Context) <-chan error return err } - log.Info("postmaster started", "postMasterPID", postMasterPID) + contextLogger.Info("postmaster started", "postMasterPID", postMasterPID) // Now we'll wait for PostgreSQL to accept connections, and setup everything required // for replication and pg_rewind to work correctly. @@ -116,7 +125,11 @@ func (i *PostgresLifecycle) runPostgresAndWait(ctx context.Context) <-chan error defer i.instance.SetCanCheckReadiness(false) postmasterExitStatus := streamingCmd.Wait() - log.Info("postmaster exited", "postmasterExitStatus", postmasterExitStatus, "postMasterPID", postMasterPID) + contextLogger.Info( + "postmaster exited", + "postmasterExitStatus", postmasterExitStatus, + "postMasterPID", postMasterPID, + ) return postmasterExitStatus } @@ -145,11 +158,6 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan return nil } - majorVersion, err := postgresutils.GetMajorVersion(instance.PgData) - if err != nil { - return fmt.Errorf("while getting major version: %w", err) - } - db, err := instance.GetSuperUserDB() if err != nil { return fmt.Errorf("while getting a connection to the instance: %w", err) @@ -169,14 +177,12 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan return fmt.Errorf("creating a new transaction to setup the instance: %w", err) } - hasSuperuser, err := configureStreamingReplicaUser(tx) - if err != nil { + if err := configureStreamingReplicaUser(tx); err != nil { _ = tx.Rollback() return err } - err = configurePgRewindPrivileges(majorVersion, hasSuperuser, tx) - if err != nil { + if err = configurePgRewindPrivileges(tx); err != nil { _ = tx.Rollback() return err } @@ -186,28 +192,28 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan // configureStreamingReplicaUser makes sure the streaming replication user exists // and has the required rights -func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) { - var hasLoginRight, hasReplicationRight, hasSuperuser bool - row := tx.QueryRow("SELECT rolcanlogin, rolreplication, rolsuper FROM pg_roles WHERE rolname = $1", +func configureStreamingReplicaUser(tx *sql.Tx) error { + var hasLoginRight, hasReplicationRight bool + row := tx.QueryRow("SELECT rolcanlogin, rolreplication FROM pg_catalog.pg_roles WHERE rolname = $1", apiv1.StreamingReplicationUser) - err := row.Scan(&hasLoginRight, &hasReplicationRight, &hasSuperuser) + err := row.Scan(&hasLoginRight, &hasReplicationRight) if err != nil { - if err != sql.ErrNoRows { - return false, fmt.Errorf("while creating streaming replication user: %w", err) + if !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("while getting streaming replication user privileges: %w", err) } _, err = tx.Exec(fmt.Sprintf( "CREATE USER %v REPLICATION", identifierStreamingReplicationUser)) if err != nil { - return false, fmt.Errorf("CREATE USER %v error: %w", apiv1.StreamingReplicationUser, err) + return fmt.Errorf("CREATE USER %v error: %w", apiv1.StreamingReplicationUser, err) } _, err = tx.Exec(fmt.Sprintf( "COMMENT ON ROLE %v IS 'Special user for streaming replication created by CloudNativePG'", identifierStreamingReplicationUser)) if err != nil { - return false, fmt.Errorf("COMMENT ON ROLE %v error: %w", apiv1.StreamingReplicationUser, err) + return fmt.Errorf("COMMENT ON ROLE %v error: %w", apiv1.StreamingReplicationUser, err) } } @@ -216,36 +222,26 @@ func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) { "ALTER USER %v LOGIN REPLICATION", identifierStreamingReplicationUser)) if err != nil { - return false, fmt.Errorf("ALTER USER %v error: %w", apiv1.StreamingReplicationUser, err) + return fmt.Errorf("ALTER USER %v error: %w", apiv1.StreamingReplicationUser, err) } } - return hasSuperuser, nil + return nil } // configurePgRewindPrivileges ensures that the StreamingReplicationUser has enough rights to execute pg_rewind -func configurePgRewindPrivileges(majorVersion int, hasSuperuser bool, tx *sql.Tx) error { - // We need the superuser bit for the streaming-replication user since pg_rewind in PostgreSQL <= 10 - // will require it. - if majorVersion <= 10 { - if !hasSuperuser { - _, err := tx.Exec(fmt.Sprintf( - "ALTER USER %v SUPERUSER", - identifierStreamingReplicationUser)) - if err != nil { - return fmt.Errorf("ALTER USER %v error: %w", apiv1.StreamingReplicationUser, err) - } - } - return nil - } - +func configurePgRewindPrivileges(tx *sql.Tx) error { // Ensure the user has rights to execute the functions needed for pg_rewind var hasPgRewindPrivileges bool row := tx.QueryRow( ` - SELECT has_function_privilege($1, 'pg_ls_dir(text, boolean, boolean)', 'execute') AND - has_function_privilege($2, 'pg_stat_file(text, boolean)', 'execute') AND - has_function_privilege($3, 'pg_read_binary_file(text)', 'execute') AND - has_function_privilege($4, 'pg_read_binary_file(text, bigint, bigint, boolean)', 'execute')`, + SELECT pg_catalog.has_function_privilege($1, + 'pg_catalog.pg_ls_dir(text, boolean, boolean)', 'execute') AND + pg_catalog.has_function_privilege($2, + 'pg_catalog.pg_stat_file(text, boolean)', 'execute') AND + pg_catalog.has_function_privilege($3, + 'pg_catalog.pg_read_binary_file(text)', 'execute') AND + pg_catalog.has_function_privilege($4, + 'pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean)', 'execute')`, apiv1.StreamingReplicationUser, apiv1.StreamingReplicationUser, apiv1.StreamingReplicationUser, diff --git a/internal/cmd/manager/instance/run/suite_test.go b/internal/cmd/manager/instance/run/suite_test.go new file mode 100644 index 0000000000..5b0258a8ea --- /dev/null +++ b/internal/cmd/manager/instance/run/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package run + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "instance run test suite") +} diff --git a/internal/cmd/manager/instance/status/cmd.go b/internal/cmd/manager/instance/status/cmd.go index dc56e49b3a..e3a4792f3f 100644 --- a/internal/cmd/manager/instance/status/cmd.go +++ b/internal/cmd/manager/instance/status/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package status implement the "instance status" subcommand of the operator @@ -29,11 +32,11 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" ) // NewCmd create the "instance status" subcommand @@ -49,15 +52,16 @@ func NewCmd() *cobra.Command { } func statusSubCommand(ctx context.Context) error { + contextLogger := log.FromContext(ctx) cli, err := management.NewControllerRuntimeClient() if err != nil { - log.Error(err, "while building the controller runtime client") + contextLogger.Error(err, "while building the controller runtime client") return err } - cluster, err := cacheClient.GetCluster() + cluster, err := local.NewClient().Cache().GetCluster() if err != nil { - log.Error(err, "while loading the cluster from cache") + contextLogger.Error(err, "while loading the cluster from cache") return err } @@ -67,7 +71,7 @@ func statusSubCommand(ctx context.Context) error { cluster.GetServerCASecretObjectKey(), ) if err != nil { - log.Error(err, "Error while building the TLS context") + contextLogger.Error(err, "Error while building the TLS context") return err } @@ -76,14 +80,14 @@ func statusSubCommand(ctx context.Context) error { resp, err = executeRequest(ctx, "http") } if err != nil { - log.Error(err, "Error while requesting instance status") + contextLogger.Error(err, "Error while requesting instance status") return err } defer func() { err = resp.Body.Close() if err != nil { - log.Error(err, "Can't close the connection", + contextLogger.Error(err, "Can't close the connection", "statusCode", resp.StatusCode, ) } @@ -91,14 +95,14 @@ func statusSubCommand(ctx context.Context) error { body, err := io.ReadAll(resp.Body) if err != nil { - log.Error(err, "Error while reading status response body", + contextLogger.Error(err, "Error while reading status response body", "statusCode", resp.StatusCode, ) return err } - if resp.StatusCode != 200 { - log.Info( + if resp.StatusCode != http.StatusOK { + contextLogger.Info( "Error while extracting status", "statusCode", resp.StatusCode, "body", string(body), @@ -108,7 +112,7 @@ func statusSubCommand(ctx context.Context) error { _, err = os.Stdout.Write(body) if err != nil { - log.Error(err, "Error while showing status info") + contextLogger.Error(err, "Error while showing status info") return err } @@ -119,15 +123,17 @@ func executeRequest(ctx context.Context, scheme string) (*http.Response, error) const connectionTimeout = 2 * time.Second const requestTimeout = 30 * time.Second + contextLogger := log.FromContext(ctx) + statusURL := url.Build( scheme, "localhost", url.PathPgStatus, url.StatusPort, ) req, err := http.NewRequestWithContext(ctx, http.MethodGet, statusURL, nil) if err != nil { - log.Error(err, "Error while building the request") + contextLogger.Error(err, "Error while building the request") return nil, err } - httpClient := resources.NewHTTPClient(connectionTimeout, requestTimeout) + httpClient := common.NewHTTPClient(connectionTimeout, requestTimeout) return httpClient.Do(req) // nolint:gosec } diff --git a/internal/cmd/manager/instance/upgrade/cmd.go b/internal/cmd/manager/instance/upgrade/cmd.go new file mode 100644 index 0000000000..6275e01445 --- /dev/null +++ b/internal/cmd/manager/instance/upgrade/cmd.go @@ -0,0 +1,40 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package upgrade implements the "instance upgrade" subcommand of the operator +package upgrade + +import ( + "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/upgrade/execute" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/upgrade/prepare" +) + +// NewCmd creates the "instance upgrade" subcommand +func NewCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "upgrade", + } + + cmd.AddCommand(prepare.NewCmd()) + cmd.AddCommand(execute.NewCmd()) + + return cmd +} diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go new file mode 100644 index 0000000000..3ecb21482f --- /dev/null +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -0,0 +1,562 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package execute implements the "instance upgrade execute" subcommand +package execute + +import ( + "context" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + cnpgiPostgres "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "github.com/cloudnative-pg/machinery/pkg/env" + "github.com/cloudnative-pg/machinery/pkg/execlog" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + "github.com/spf13/cobra" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" + "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" + postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" + instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" + instancestorage "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/storage" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// NewCmd creates the cobra command +func NewCmd() *cobra.Command { + var pgData string + var podName string + var clusterName string + var namespace string + var pgUpgrade string + var pgUpgradeArgs []string + var initdb string + var initdbArgs []string + + cmd := &cobra.Command{ + Use: "execute [options]", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + oldBinDirFile := args[0] + ctx := cmd.Context() + + // The fields in the instance are needed to correctly + // download the secret containing the TLS + // certificates + instance := postgres.NewInstance(). + WithNamespace(namespace). + WithPodName(podName). + WithClusterName(clusterName) + + // Read the old bindir from the passed file + oldBinDirBytes, err := fileutils.ReadFile(oldBinDirFile) + if err != nil { + return fmt.Errorf("error while reading the old bindir: %w", err) + } + + oldBinDir := strings.TrimSpace(string(oldBinDirBytes)) + info := upgradeInfo{ + pgData: pgData, + oldBinDir: oldBinDir, + pgUpgrade: pgUpgrade, + pgUpgradeArgs: pgUpgradeArgs, + initdb: initdb, + initdbArgs: initdbArgs, + } + return info.upgradeSubCommand(ctx, instance) + }, + PostRunE: func(cmd *cobra.Command, _ []string) error { + if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { + return err + } + + return linkerd.TryInvokeShutdownEndpoint(cmd.Context()) + }, + } + + cmd.Flags().StringVar(&pgData, "pg-data", os.Getenv("PGDATA"), "The PGDATA to be created") + cmd.Flags().StringVar(&podName, "pod-name", os.Getenv("POD_NAME"), "The name of this pod, to "+ + "be checked against the cluster state") + cmd.Flags().StringVar(&namespace, "namespace", os.Getenv("NAMESPACE"), "The namespace of "+ + "the cluster and of the Pod in k8s") + cmd.Flags().StringVar(&clusterName, "cluster-name", os.Getenv("CLUSTER_NAME"), "The name of "+ + "the current cluster in k8s, used to download TLS certificates") + cmd.Flags().StringVar(&pgUpgrade, "pg-upgrade", env.GetOrDefault("PG_UPGRADE", "pg_upgrade"), + `The path of "pg_upgrade" executable. Defaults to "pg_upgrade".`) + cmd.Flags().StringArrayVar(&pgUpgradeArgs, "pg-upgrade-args", nil, + `Additional arguments for "pg_upgrade" invocation. `+ + `Use the --pg-upgrade-args flag multiple times to pass multiple arguments.`) + cmd.Flags().StringVar(&initdb, "initdb", env.GetOrDefault("INITDB", "initdb"), + `The path of "initdb" executable. Defaults to "initdb".`) + cmd.Flags().StringArrayVar(&initdbArgs, "initdb-args", nil, + `Additional arguments for "initdb" invocation.`+ + `Use the --initdb-args flag multiple times to pass multiple arguments.`) + + return cmd +} + +type upgradeInfo struct { + pgData string + oldBinDir string + pgUpgrade string + pgUpgradeArgs []string + initdb string + initdbArgs []string +} + +// nolint:gocognit +func (ui upgradeInfo) upgradeSubCommand(ctx context.Context, instance *postgres.Instance) error { + contextLogger := log.FromContext(ctx) + + client, err := management.NewControllerRuntimeClient() + if err != nil { + contextLogger.Error(err, "Error creating Kubernetes client") + return err + } + + clusterObjectKey := ctrl.ObjectKey{Name: instance.GetClusterName(), Namespace: instance.GetNamespaceName()} + if err = management.WaitForGetClusterWithClient(ctx, client, clusterObjectKey); err != nil { + return err + } + + // Download the cluster definition from the API server + var cluster apiv1.Cluster + if err := client.Get(ctx, clusterObjectKey, &cluster); err != nil { + contextLogger.Error(err, "Error while getting cluster") + return err + } + instance.Cluster = &cluster + + if _, err := instancecertificate.NewReconciler(client, instance).RefreshSecrets(ctx, &cluster); err != nil { + return fmt.Errorf("error while downloading secrets: %w", err) + } + + if err := instancestorage.ReconcileWalDirectory(ctx); err != nil { + return fmt.Errorf("error while reconciling the WAL storage: %w", err) + } + + if err := fileutils.EnsureDirectoryExists(postgres.GetSocketDir()); err != nil { + return fmt.Errorf("while creating socket directory: %w", err) + } + + contextLogger.Info("Searching for failed upgrades") + + var failedDirs []string + for _, dir := range []string{specs.PgDataPath, specs.PgWalVolumePgWalPath} { + matches, err := filepath.Glob(dir + "*.failed_*") + if err != nil { + return fmt.Errorf("error matching paths: %w", err) + } + failedDirs = append(failedDirs, matches...) + } + if len(failedDirs) > 0 { + return fmt.Errorf("found failed upgrade directories: %v", failedDirs) + } + + contextLogger.Info("Starting the upgrade process") + + newDataDir := fmt.Sprintf("%s-new", specs.PgDataPath) + var newWalDir *string + if cluster.ShouldCreateWalArchiveVolume() { + newWalDir = ptr.To(fmt.Sprintf("%s-new", specs.PgWalVolumePgWalPath)) + } + + contextLogger.Info("Ensuring the new data directory does not exist", "directory", newDataDir) + + if err := os.RemoveAll(newDataDir); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + + if newWalDir != nil { + contextLogger.Info("Ensuring the new pg_wal directory does not exist", "directory", *newWalDir) + if err := os.RemoveAll(*newWalDir); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + } + + // Extract controldata information from the old data directory + controlData, err := getControlData(ui.oldBinDir, ui.pgData) + if err != nil { + return fmt.Errorf("error while getting old data directory control data: %w", err) + } + + targetVersion, err := cluster.GetPostgresqlMajorVersion() + if err != nil { + return fmt.Errorf("error while getting the target version from the cluster object: %w", err) + } + + contextLogger.Info("Creating data directory", "directory", newDataDir) + if err := runInitDB(newDataDir, newWalDir, controlData, targetVersion, ui.initdb, ui.initdbArgs); err != nil { + return fmt.Errorf("error while creating the data directory: %w", err) + } + + contextLogger.Info("Preparing configuration files", "directory", newDataDir) + if err := prepareConfigurationFiles(ctx, cluster, newDataDir); err != nil { + return err + } + + contextLogger.Info("Checking if we have anything to update") + // Read pg_version from both the old and new data directories + oldVersion, err := postgresutils.GetMajorVersionFromPgData(ui.pgData) + if err != nil { + return fmt.Errorf("error while reading the old version: %w", err) + } + + newVersion, err := postgresutils.GetMajorVersionFromPgData(newDataDir) + if err != nil { + return fmt.Errorf("error while reading the new version: %w", err) + } + + if oldVersion == newVersion { + contextLogger.Info("Versions are the same, no need to upgrade") + if err := os.RemoveAll(newDataDir); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + return nil + } + + // We need to make sure that the permissions are the right ones + // in some systems they may be messed up even if we fix them before + _ = fileutils.EnsurePgDataPerms(ui.pgData) + _ = fileutils.EnsurePgDataPerms(newDataDir) + + contextLogger.Info("Running pg_upgrade") + + if err := ui.runPgUpgrade(newDataDir); err != nil { + // TODO: in case of failures we should dump the content of the pg_upgrade logs + return fmt.Errorf("error while running pg_upgrade: %w", err) + } + + err = moveDataInPlace(ctx, ui.pgData, oldVersion, newDataDir, newWalDir) + if err != nil { + contextLogger.Error(err, + "Error while moving the data in place, saving the new data directory to avoid data loss") + + suffixTimestamp := fileutils.FormatFriendlyTimestamp(time.Now()) + + dirToBeSaved := []string{ + newDataDir, + ui.pgData + ".old", + } + if newWalDir != nil { + dirToBeSaved = append(dirToBeSaved, + *newWalDir, + specs.PgWalVolumePgWalPath+".old", + ) + } + + for _, dir := range dirToBeSaved { + failedPgDataName := fmt.Sprintf("%s.failed_%s", dir, suffixTimestamp) + if errInner := moveDirIfExists(ctx, dir, failedPgDataName); errInner != nil { + contextLogger.Error(errInner, "Error while saving a directory after a failure", "dir", dir) + } + } + + return err + } + + contextLogger.Info("Upgrade completed successfully") + + return nil +} + +func getControlData(binDir, pgData string) (map[string]string, error) { + pgControlDataCmd := exec.Command(path.Join(binDir, "pg_controldata")) // #nosec + pgControlDataCmd.Env = append(os.Environ(), "PGDATA="+pgData) + + out, err := pgControlDataCmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("while executing pg_controldata: %w", err) + } + + return utils.ParsePgControldataOutput(string(out)), nil +} + +func runInitDB( + destDir string, + walDir *string, + pgControlData map[string]string, + targetMajorVersion int, + initdb string, + initdbArgs []string, +) error { + // Invoke initdb to generate a data directory + options := []string{ + "--username", + "postgres", + "-D", + destDir, + } + + if walDir != nil { + options = append(options, "--waldir", *walDir) + } + + // Extract the WAL segment size from the pg_controldata output + options, err := tryAddWalSegmentSize(pgControlData, options) + if err != nil { + return err + } + + options, err = tryAddDataChecksums(pgControlData, targetMajorVersion, options) + if err != nil { + return err + } + + options = append(options, initdbArgs...) + + // Certain CSI drivers may add setgid permissions on newly created folders. + // A default umask is set to attempt to avoid this, by revoking group/other + // permission bits on the PGDATA + _ = compatibility.Umask(0o077) + + initdbCmd := exec.Command(initdb, options...) // #nosec + if err := execlog.RunStreaming(initdbCmd, initdb); err != nil { + return err + } + + return nil +} + +func tryAddDataChecksums( + pgControlData utils.PgControlData, + targetMajorVersion int, + options []string, +) ([]string, error) { + dataPageChecksumVersion, err := pgControlData.GetDataPageChecksumVersion() + if err != nil { + return nil, err + } + + if dataPageChecksumVersion != "1" { + // In postgres 18 we will have to set "--no-data-checksums" if checksums are disabled (they are enabled by default) + if targetMajorVersion >= 18 { + return append(options, "--no-data-checksums"), nil + } + return options, nil + } + + return append(options, "--data-checksums"), nil +} + +func tryAddWalSegmentSize(pgControlData utils.PgControlData, options []string) ([]string, error) { + walSegmentSize, err := pgControlData.GetBytesPerWALSegment() + if err != nil { + return nil, fmt.Errorf("error while reading the WAL segment size: %w", err) + } + + param := "--wal-segsize=" + strconv.Itoa(walSegmentSize/(1024*1024)) + return append(options, param), nil +} + +func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destDir string) error { + // Always read the custom and override configuration files created by the operator + _, err := configfile.EnsureIncludes(path.Join(destDir, "postgresql.conf"), + constants.PostgresqlCustomConfigurationFile, + constants.PostgresqlOverrideConfigurationFile, + ) + if err != nil { + return fmt.Errorf("appending inclusion directives to postgresql.conf file resulted in an error: %w", err) + } + + // Set `max_slot_wal_keep_size` to the default value because any other value causes an error + // during pg_upgrade in PostgreSQL 17 before 17.6. The bug has been fixed with the commit + // https://github.com/postgres/postgres/commit/f36e5774 + tmpCluster := cluster.DeepCopy() + tmpCluster.Spec.PostgresConfiguration.Parameters["max_slot_wal_keep_size"] = "-1" + + pgMajorVersion, err := postgresutils.GetMajorVersionFromPgData(destDir) + if err != nil { + return fmt.Errorf("error while reading the new data directory version: %w", err) + } + if pgMajorVersion >= 18 { + tmpCluster.Spec.PostgresConfiguration.Parameters["idle_replication_slot_timeout"] = "0" + } + + enabledPluginNamesSet := stringset.From(cluster.GetJobEnabledPluginNames()) + pluginCli, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + return fmt.Errorf("error while creating the plugin client: %w", err) + } + defer pluginCli.Close(ctx) + + ctx = pluginClient.SetPluginClientInContext(ctx, pluginCli) + ctx = cluster.SetInContext(ctx) + + newInstance := postgres.Instance{PgData: destDir} + if _, err := newInstance.RefreshConfigurationFilesFromCluster( + ctx, + tmpCluster, + false, + cnpgiPostgres.OperationType_TYPE_UPGRADE, + ); err != nil { + return fmt.Errorf("error while creating the configuration files for new datadir %q: %w", destDir, err) + } + + if _, err := newInstance.RefreshPGIdent(ctx, nil); err != nil { + return fmt.Errorf("error while creating the pg_ident.conf file for new datadir %q: %w", destDir, err) + } + + // Create a stub for the configuration file + // to be filled during the real startup of this instance + err = fileutils.CreateEmptyFile(path.Join(destDir, constants.PostgresqlOverrideConfigurationFile)) + if err != nil { + return fmt.Errorf("creating the operator managed configuration file '%v' resulted in an error: %w", + constants.PostgresqlOverrideConfigurationFile, err) + } + + return nil +} + +func (ui upgradeInfo) runPgUpgrade( + newDataDir string, +) error { + args := []string{ + "--link", + "--username", "postgres", + "--old-bindir", ui.oldBinDir, + "--old-datadir", ui.pgData, + "--new-datadir", newDataDir, + } + args = append(args, ui.pgUpgradeArgs...) + + // Run the pg_upgrade command + cmd := exec.Command(ui.pgUpgrade, args...) // #nosec + cmd.Dir = newDataDir + if err := execlog.RunStreaming(cmd, path.Base(ui.pgUpgrade)); err != nil { + return fmt.Errorf("error while running %q: %w", cmd, err) + } + + return nil +} + +func moveDataInPlace( + ctx context.Context, + pgData string, + oldMajor int, + newDataDir string, + newWalDir *string, +) error { + contextLogger := log.FromContext(ctx) + + contextLogger.Info("Cleaning up the new data directory") + if err := os.RemoveAll(path.Join(newDataDir, "delete_old_cluster.sh")); err != nil { + return fmt.Errorf("error while removing the delete_old_cluster.sh script: %w", err) + } + + contextLogger.Info("Moving the old data directory") + if err := os.Rename(pgData, pgData+".old"); err != nil { + return fmt.Errorf("error while moving the old data directory: %w", err) + } + + if newWalDir != nil { + contextLogger.Info("Moving the old pg_wal directory") + if err := os.Rename(specs.PgWalVolumePgWalPath, specs.PgWalVolumePgWalPath+".old"); err != nil { + return fmt.Errorf("error while moving the old pg_wal directory: %w", err) + } + } + + contextLogger.Info("Moving the new data directory in place") + if err := os.Rename(newDataDir, pgData); err != nil { + return fmt.Errorf("error while moving the new data directory: %w", err) + } + + if newWalDir != nil { + contextLogger.Info("Moving the new pg_wal directory in place") + if err := os.Rename(*newWalDir, specs.PgWalVolumePgWalPath); err != nil { + return fmt.Errorf("error while moving the pg_wal directory content: %w", err) + } + if err := fileutils.RemoveFile(specs.PgWalPath); err != nil { + return fmt.Errorf("error while removing the symlink to pg_wal: %w", err) + } + if err := os.Symlink(specs.PgWalVolumePgWalPath, specs.PgWalPath); err != nil { + return fmt.Errorf("error while creating the symlink to pg_wal: %w", err) + } + } + + contextLogger.Info("Removing the old data directory and pg_wal directory") + if err := os.RemoveAll(pgData + ".old"); err != nil { + return fmt.Errorf("error while removing the old data directory: %w", err) + } + if err := os.RemoveAll(specs.PgWalVolumePgWalPath + ".old"); err != nil { + return fmt.Errorf("error while removing the old pg_wal directory: %w", err) + } + + contextLogger.Info("Cleaning up the previous version directory from tablespaces") + if err := removeMatchingPaths(ctx, + path.Join(pgData, "pg_tblspc", "*", fmt.Sprintf("PG_%v_*", oldMajor))); err != nil { + return fmt.Errorf("error while removing the old tablespaces directories: %w", err) + } + + return nil +} + +func removeMatchingPaths(ctx context.Context, pattern string) error { + contextLogger := log.FromContext(ctx) + contextLogger.Info("Removing matching paths", "pattern", pattern) + + // Find all matching paths + matches, err := filepath.Glob(pattern) + if err != nil { + return fmt.Errorf("error matching paths: %w", err) + } + + // Iterate through the matches and remove each + for _, match := range matches { + contextLogger.Info("Removing path", "path", match) + err := os.RemoveAll(match) + if err != nil { + return fmt.Errorf("failed to remove %s: %w", match, err) + } + } + + return nil +} + +func moveDirIfExists(ctx context.Context, oldPath string, newPath string) error { + contextLogger := log.FromContext(ctx) + if _, errExists := os.Stat(oldPath); !os.IsNotExist(errExists) { + contextLogger.Info("Moving directory", "oldPath", oldPath, "newPath", newPath) + err := os.Rename(oldPath, newPath) + if err != nil { + return err + } + } + + return nil +} diff --git a/internal/cmd/manager/instance/upgrade/prepare/cmd.go b/internal/cmd/manager/instance/upgrade/prepare/cmd.go new file mode 100644 index 0000000000..b5a102a960 --- /dev/null +++ b/internal/cmd/manager/instance/upgrade/prepare/cmd.go @@ -0,0 +1,126 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package prepare implement the "instance upgrade prepare" subcommand +package prepare + +import ( + "context" + "fmt" + "os" + "os/exec" + "path" + + "github.com/cloudnative-pg/machinery/pkg/env" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/pgconfig" + "github.com/spf13/cobra" +) + +// NewCmd create the cobra command +func NewCmd() *cobra.Command { + var pgConfig string + + cmd := cobra.Command{ + Use: "prepare [target]", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + contextLogger := log.FromContext(cmd.Context()) + dest := args[0] + + if err := copyPostgresInstallation(cmd.Context(), pgConfig, dest); err != nil { + contextLogger.Error(err, "Failed to copy the PostgreSQL installation") + return err + } + + return nil + }, + } + + cmd.Flags().StringVar(&pgConfig, "pg-config", env.GetOrDefault("PG_CONFIG", "pg_config"), + `The path of "pg_config" executable. Defaults to "pg_config".`) + + return &cmd +} + +// copyPostgresInstallation replicates the PostgreSQL installation to the specified destination directory +// for use by the pg_upgrade command as the old binary directory. +// +// Steps performed: +// 1. Removes the existing destination directory if it exists. +// 2. Retrieves the PostgreSQL binary, library, and shared directories using pg_config. +// 3. Creates the corresponding directories in the destination path. +// 4. Copies the contents of the PostgreSQL directories to the destination. +// 5. Creates a bindir.txt file in the destination directory with the path to the binary directory. +func copyPostgresInstallation(ctx context.Context, pgConfig string, dest string) error { + contextLogger := log.FromContext(ctx) + + dest = path.Clean(dest) + + contextLogger.Info("Copying the PostgreSQL installation to the destination", "destination", dest) + + contextLogger.Info("Removing the destination directory", "directory", dest) + if err := os.RemoveAll(dest); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + + contextLogger.Info("Creating the destination directory", "directory", dest) + if err := os.MkdirAll(dest, 0o750); err != nil { + return fmt.Errorf("failed to create the directory: %w", err) + } + + copyLocations := []pgconfig.ConfigurationParameter{pgconfig.BinDir, pgconfig.PkgLibDir, pgconfig.ShareDir} + for _, config := range copyLocations { + sourceDir, err := pgconfig.GetConfigurationParameter(pgConfig, config) + if err != nil { + return err + } + sourceDir = path.Clean(sourceDir) + destDir := path.Clean(path.Join(dest, sourceDir)) + + if config == pgconfig.BinDir { + destFile := path.Join(dest, "bindir.txt") + contextLogger.Info("Creating the bindir.txt file", "file", destFile) + if _, err := fileutils.WriteStringToFile(destFile, fmt.Sprintf("%s\n", destDir)); err != nil { + return fmt.Errorf("failed to write the %q file: %w", destFile, err) + } + } + + contextLogger.Info("Creating the directory", "directory", destDir) + if err := os.MkdirAll(destDir, 0o750); err != nil { + return fmt.Errorf("failed to create the directory: %w", err) + } + + contextLogger.Info("Copying the files", "source", sourceDir, "destination", destDir) + + // We use "cp" instead of os.CopyFS because the latter doesn't + // support symbolic links as of Go 1.24 and we don't want to + // include any other dependencies in the project nor + // re-implementing the wheel. + // + // This should be re-evaluated in the future and the + // requirement to have "cp" in the image should be removed. + if err := exec.Command("cp", "-a", sourceDir+"/.", destDir).Run(); err != nil { //nolint:gosec + return fmt.Errorf("failed to copy the files: %w", err) + } + } + + return nil +} diff --git a/internal/cmd/manager/pgbouncer/cmd.go b/internal/cmd/manager/pgbouncer/cmd.go index 138963dcfb..93dac35669 100644 --- a/internal/cmd/manager/pgbouncer/cmd.go +++ b/internal/cmd/manager/pgbouncer/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbouncer implements the "pgbouncer" subcommand of the operator @@ -19,10 +22,12 @@ package pgbouncer import ( "fmt" + "os" "github.com/spf13/cobra" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/pgbouncer/run" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // NewCmd creates the "instance" command @@ -31,6 +36,9 @@ func NewCmd() *cobra.Command { Use: "pgbouncer", Short: "pgbouncer management subfeatures", SilenceErrors: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + return os.MkdirAll(postgres.TemporaryDirectory, 0o1777) //nolint:gosec + }, RunE: func(_ *cobra.Command, _ []string) error { return fmt.Errorf("missing subcommand") }, diff --git a/internal/cmd/manager/pgbouncer/run/cmd.go b/internal/cmd/manager/pgbouncer/run/cmd.go index 3c283478a7..cd3a77cf69 100644 --- a/internal/cmd/manager/pgbouncer/run/cmd.go +++ b/internal/cmd/manager/pgbouncer/run/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package run implements the "pgbouncer run" subcommand of the operator @@ -54,9 +57,10 @@ func NewCmd() *cobra.Command { cmd := &cobra.Command{ Use: "run", SilenceErrors: true, - PreRunE: func(_ *cobra.Command, _ []string) error { + PreRunE: func(cmd *cobra.Command, _ []string) error { + contextLogger := log.FromContext(cmd.Context()) if poolerNamespacedName.Name == "" || poolerNamespacedName.Namespace == "" { - log.Info( + contextLogger.Info( "pooler object key not set", "poolerNamespacedName", poolerNamespacedName) return errorMissingPoolerNamespacedName @@ -64,8 +68,14 @@ func NewCmd() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - if err := runSubCommand(cmd.Context(), poolerNamespacedName); err != nil { - log.Error(err, "Error while running manager") + ctx := log.IntoContext( + cmd.Context(), + log.GetLogger().WithValues("logger", "pgbouncer-manager"), + ) + contextLogger := log.FromContext(ctx) + + if err := runSubCommand(ctx, poolerNamespacedName); err != nil { + contextLogger.Error(err, "Error while running manager") return err } return nil @@ -91,11 +101,12 @@ func NewCmd() *cobra.Command { func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedName) error { var err error - log.Info("Starting CloudNativePG PgBouncer Instance Manager", + contextLogger := log.FromContext(ctx) + contextLogger.Info("Starting CloudNativePG PgBouncer Instance Manager", "version", versions.Version, "build", versions.Info) - if err = startWebServer(); err != nil { + if err = startWebServer(ctx); err != nil { return fmt.Errorf("while starting the web server: %w", err) } @@ -114,10 +125,10 @@ func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedNam pgBouncerIni := filepath.Join(config.ConfigsDir, config.PgBouncerIniFileName) pgBouncerCmd := exec.Command(pgBouncerCommandName, pgBouncerIni) //nolint:gosec stdoutWriter := &execlog.LogWriter{ - Logger: log.WithValues(execlog.PipeKey, execlog.StdOut), + Logger: contextLogger.WithValues(execlog.PipeKey, execlog.StdOut), } stderrWriter := &pgBouncerLogWriter{ - Logger: log.WithValues(execlog.PipeKey, execlog.StdErr), + Logger: contextLogger.WithValues(execlog.PipeKey, execlog.StdErr), } streamingCmd, err := execlog.RunStreamingNoWaitWithWriter( pgBouncerCmd, pgBouncerCommandName, stdoutWriter, stderrWriter) @@ -126,14 +137,14 @@ func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedNam } startReconciler(ctx, reconciler) - registerSignalHandler(reconciler, pgBouncerCmd) + registerSignalHandler(ctx, reconciler, pgBouncerCmd) if err = streamingCmd.Wait(); err != nil { var exitError *exec.ExitError if !errors.As(err, &exitError) { - log.Error(err, "Error waiting on pgbouncer process") + contextLogger.Error(err, "Error waiting on pgbouncer process") } else { - log.Error(exitError, "pgbouncer process exited with errors") + contextLogger.Error(exitError, "pgbouncer process exited with errors") } return err } @@ -143,29 +154,30 @@ func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedNam // registerSignalHandler handles signals from k8s, notifying postgres as // needed -func registerSignalHandler(reconciler *controller.PgBouncerReconciler, command *exec.Cmd) { +func registerSignalHandler(ctx context.Context, reconciler *controller.PgBouncerReconciler, command *exec.Cmd) { + contextLogger := log.FromContext(ctx) signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-signals - log.Info("Received termination signal", "signal", sig) + contextLogger.Info("Received termination signal", "signal", sig) - log.Info("Shutting down web server") + contextLogger.Info("Shutting down web server") err := metricsserver.Shutdown() if err != nil { - log.Error(err, "Error while shutting down the metrics server") + contextLogger.Error(err, "Error while shutting down the metrics server") } else { - log.Info("Metrics server shut down") + contextLogger.Info("Metrics server shut down") } reconciler.Stop() if command != nil { - log.Info("Shutting down pgbouncer instance") + contextLogger.Info("Shutting down pgbouncer instance") err := command.Process.Signal(syscall.SIGINT) if err != nil { - log.Error(err, "Unable to send SIGINT to pgbouncer instance") + contextLogger.Error(err, "Unable to send SIGINT to pgbouncer instance") } } }() @@ -173,15 +185,16 @@ func registerSignalHandler(reconciler *controller.PgBouncerReconciler, command * // startWebServer start the web server for handling probes given // a certain PostgreSQL instance -func startWebServer() error { - if err := metricsserver.Setup(); err != nil { +func startWebServer(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + if err := metricsserver.Setup(ctx); err != nil { return err } go func() { err := metricsserver.ListenAndServe() if err != nil { - log.Error(err, "Error while starting the metrics server") + contextLogger.Error(err, "Error while starting the metrics server") } }() diff --git a/internal/cmd/manager/pgbouncer/run/log.go b/internal/cmd/manager/pgbouncer/run/log.go index d6022b6302..9df826cbc7 100644 --- a/internal/cmd/manager/pgbouncer/run/log.go +++ b/internal/cmd/manager/pgbouncer/run/log.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/pgbouncer/run/log_test.go b/internal/cmd/manager/pgbouncer/run/log_test.go index d1be815db8..a291a68d62 100644 --- a/internal/cmd/manager/pgbouncer/run/log_test.go +++ b/internal/cmd/manager/pgbouncer/run/log_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/pgbouncer/run/suite_test.go b/internal/cmd/manager/pgbouncer/run/suite_test.go index bc238656c0..febaf17610 100644 --- a/internal/cmd/manager/pgbouncer/run/suite_test.go +++ b/internal/cmd/manager/pgbouncer/run/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/show/cmd.go b/internal/cmd/manager/show/cmd.go index b21af7f97a..1d31f7a40e 100644 --- a/internal/cmd/manager/show/cmd.go +++ b/internal/cmd/manager/show/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package show implement the show command subfeatures diff --git a/internal/cmd/manager/show/walarchivequeue/cmd.go b/internal/cmd/manager/show/walarchivequeue/cmd.go index a26cc7cabc..d1f774bfe7 100644 --- a/internal/cmd/manager/show/walarchivequeue/cmd.go +++ b/internal/cmd/manager/show/walarchivequeue/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package walarchivequeue implement the wal-archive-queue command @@ -32,9 +35,12 @@ func NewCmd() *cobra.Command { cmd := cobra.Command{ Use: "wal-archive-queue", Short: "Lists all .ready wal files in " + specs.PgWalArchiveStatusPath, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + contextLogger := log.FromContext(ctx) + if err := run(); err != nil { - log.Error(err, "Error while extracting the list of .ready files") + contextLogger.Error(err, "Error while extracting the list of .ready files") } return nil diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 616cecefa0..f38e73ea48 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,37 +13,23 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package walarchive implement the wal-archive command package walarchive import ( - "context" "errors" "fmt" "os" - "path" - "path/filepath" - "time" - barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" - "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" - "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" - pgManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" ) // errSwitchoverInProgress is raised when there is a switchover in progress @@ -69,49 +56,28 @@ func NewCmd() *cobra.Command { return err } - typedClient, err := management.NewControllerRuntimeClient() - if err != nil { - contextLog.Error(err, "creating controller-runtine client") - return err - } - - cluster, err := cacheClient.GetCluster() - if err != nil { - return fmt.Errorf("failed to get cluster: %w", err) + localClient := local.NewClient() + cluster, errCluster := localClient.Cache().GetCluster() + if errCluster != nil { + return fmt.Errorf("failed to get cluster: %w", errCluster) } - err = run(ctx, podName, pgData, cluster, args) - if err != nil { + if err := archiver.Run(ctx, podName, pgData, cluster, args[0]); err != nil { if errors.Is(err, errSwitchoverInProgress) { contextLog.Warning("Refusing to archive WALs until the switchover is not completed", "err", err) } else { contextLog.Error(err, logErrorMessage) } - - condition := metav1.Condition{ - Type: string(apiv1.ConditionContinuousArchiving), - Status: metav1.ConditionFalse, - Reason: string(apiv1.ConditionReasonContinuousArchivingFailing), - Message: err.Error(), - } - if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil { - log.Error(errCond, "Error changing wal archiving condition (wal archiving failed)") + if reqErr := localClient.Cluster().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil { + contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint") } return err } - // Update the condition if needed. - condition := metav1.Condition{ - Type: string(apiv1.ConditionContinuousArchiving), - Status: metav1.ConditionTrue, - Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess), - Message: "Continuous archiving is working", + if err := localClient.Cluster().SetWALArchiveStatusCondition(ctx, ""); err != nil { + contextLog.Error(err, "while invoking the set wal archive condition endpoint") } - if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil { - log.Error(errCond, "Error changing wal archiving condition (wal archiving succeeded)") - } - return nil }, } @@ -121,190 +87,3 @@ func NewCmd() *cobra.Command { return &cmd } - -func run( - ctx context.Context, - podName, pgData string, - cluster *apiv1.Cluster, - args []string, -) error { - startTime := time.Now() - contextLog := log.FromContext(ctx) - walName := args[0] - - if cluster.IsReplica() { - if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary { - contextLog.Debug("WAL archiving on a replica cluster, "+ - "but this node is not the target primary nor the current one. "+ - "Skipping WAL archiving", - "walName", walName, - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary, - ) - return nil - } - } - - if cluster.Status.CurrentPrimary != podName { - contextLog.Info("Refusing to archive WAL when there is a switchover in progress", - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary, - "podName", podName) - return errSwitchoverInProgress - } - - // Request the plugins to archive this WAL - if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil { - return err - } - - // Request Barman Cloud to archive this WAL - if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { - // Backup not configured, skipping WAL - contextLog.Info("Backup not configured, skip WAL archiving via Barman Cloud", - "walName", walName, - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary, - ) - return nil - } - - // Get environment from cache - env, err := cacheClient.GetEnv(cache.WALArchiveKey) - if err != nil { - return fmt.Errorf("failed to get envs: %w", err) - } - - maxParallel := 1 - if cluster.Spec.Backup.BarmanObjectStore.Wal != nil { - maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel - } - - // Create the archiver - var walArchiver *barmanArchiver.WALArchiver - if walArchiver, err = barmanArchiver.New( - ctx, - env, - postgres.SpoolDirectory, - pgData, - path.Join(pgData, pgManagement.CheckEmptyWalArchiveFile)); err != nil { - return fmt.Errorf("while creating the archiver: %w", err) - } - - // Step 1: Check if the archive location is safe to perform archiving - if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) { - if err := checkWalArchive(ctx, cluster, walArchiver, pgData); err != nil { - return err - } - } - - // Step 2: check if this WAL file has not been already archived - var isDeletedFromSpool bool - isDeletedFromSpool, err = walArchiver.DeleteFromSpool(walName) - if err != nil { - return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err) - } - if isDeletedFromSpool { - contextLog.Info("Archived WAL file (parallel)", - "walName", walName, - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary) - return nil - } - - // Step 3: gather the WAL files names to archive - walFilesList := walArchiver.GatherWALFilesToArchive(ctx, walName, maxParallel) - - options, err := walArchiver.BarmanCloudWalArchiveOptions( - ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) - if err != nil { - return err - } - - // Step 5: archive the WAL files in parallel - uploadStartTime := time.Now() - walStatus := walArchiver.ArchiveList(ctx, walFilesList, options) - if len(walStatus) > 1 { - contextLog.Info("Completed archive command (parallel)", - "walsCount", len(walStatus), - "startTime", startTime, - "uploadStartTime", uploadStartTime, - "uploadTotalTime", time.Since(uploadStartTime), - "totalTime", time.Since(startTime)) - } - - // We return only the first error to PostgreSQL, because the first error - // is the one raised by the file that PostgreSQL has requested to archive. - // The other errors are related to WAL files that were pre-archived as - // a performance optimization and are just logged - return walStatus[0].Err -} - -// archiveWALViaPlugins requests every capable plugin to archive the passed -// WAL file, and returns an error if a configured plugin fails to do so. -// It will not return an error if there's no plugin capable of WAL archiving -func archiveWALViaPlugins( - ctx context.Context, - cluster *apiv1.Cluster, - walName string, -) error { - contextLogger := log.FromContext(ctx) - - plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { - contextLogger.Error(err, "Error while loading local plugins") - } - defer plugins.Close() - - client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) - if err != nil { - contextLogger.Error(err, "Error while loading required plugins") - return err - } - defer client.Close(ctx) - - return client.ArchiveWAL(ctx, cluster, walName) -} - -// isCheckWalArchiveFlagFilePresent returns true if the file CheckEmptyWalArchiveFile is present in the PGDATA directory -func isCheckWalArchiveFlagFilePresent(ctx context.Context, pgDataDirectory string) bool { - contextLogger := log.FromContext(ctx) - filePath := filepath.Join(pgDataDirectory, pgManagement.CheckEmptyWalArchiveFile) - - exists, err := fileutils.FileExists(filePath) - if err != nil { - contextLogger.Error(err, "error while checking for the existence of the CheckEmptyWalArchiveFile") - } - // If the check empty wal archive file doesn't exist this it's a no-op - if !exists { - contextLogger.Debug("WAL check flag file not found, skipping check") - return false - } - - return exists -} - -func checkWalArchive( - ctx context.Context, - cluster *apiv1.Cluster, - walArchiver *barmanArchiver.WALArchiver, - pgData string, -) error { - checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( - ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) - if err != nil { - log.Error(err, "while getting barman-cloud-wal-archive options") - return err - } - - if !isCheckWalArchiveFlagFilePresent(ctx, pgData) { - return nil - } - - if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { - log.Error(err, "while barman-cloud-check-wal-archive") - return err - } - - return nil -} diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index c60c0cf194..2e1043f791 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package walrestore implement the walrestore command @@ -29,14 +32,14 @@ import ( barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) @@ -69,6 +72,8 @@ func NewCmd() *cobra.Command { SilenceErrors: true, Args: cobra.ExactArgs(2), RunE: func(cobraCmd *cobra.Command, args []string) error { + // TODO: The command is triggered by PG, resulting in the loss of stdout logs. + // TODO: We need to implement a logpipe to prevent this. contextLog := log.WithName("wal-restore") ctx := log.IntoContext(cobraCmd.Context(), contextLog) err := run(ctx, pgData, podName, args) @@ -80,7 +85,7 @@ func NewCmd() *cobra.Command { case errors.Is(err, barmanRestorer.ErrWALNotFound): // Nothing to log here. The failure has already been logged. case errors.Is(err, ErrNoBackupConfigured): - contextLog.Info("tried restoring WALs, but no backup was configured") + contextLog.Debug("tried restoring WALs, but no backup was configured") case errors.Is(err, ErrEndOfWALStreamReached): contextLog.Info( "end-of-wal-stream flag found." + @@ -112,13 +117,28 @@ func run(ctx context.Context, pgData string, podName string, args []string) erro var cluster *apiv1.Cluster var err error + cacheClient := local.NewClient().Cache() cluster, err = cacheClient.GetCluster() if err != nil { return fmt.Errorf("failed to get cluster: %w", err) } - if err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)); err != nil { - return err + walFound, err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)) + if err != nil { + // With the current implementation, this happens when both of the following conditions are met: + // + // 1. At least one CNPG-i plugin that implements the WAL service is present. + // 2. No plugin can restore the WAL file because: + // a) The requested WAL could not be found + // b) The plugin failed in the restoration process. + // + // When this happens, `walFound` is false, prompting us to revert to the in-tree barman-cloud support. + contextLog.Trace("could not restore WAL via plugins", "wal", walName, "error", err) + } + if walFound { + // This happens only if a CNPG-i plugin was able to restore + // the requested WAL. + return nil } recoverClusterName, recoverEnv, barmanConfiguration, err := GetRecoverConfiguration(cluster, podName) @@ -243,19 +263,22 @@ func restoreWALViaPlugins( cluster *apiv1.Cluster, walName string, destinationPathName string, -) error { +) (bool, error) { contextLogger := log.FromContext(ctx) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { - contextLogger.Error(err, "Error while loading local plugins") - } defer plugins.Close() - client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) + enabledPluginNames = append( + enabledPluginNames, + apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., + ) + enabledPluginNamesSet := stringset.From(enabledPluginNames) + client, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) if err != nil { contextLogger.Error(err, "Error while loading required plugins") - return err + return false, err } defer client.Close(ctx) @@ -330,9 +353,9 @@ func GetRecoverConfiguration( return "", nil, nil, ErrNoBackupConfigured } configuration := externalCluster.BarmanObjectStore - if configuration.EndpointCA != nil && configuration.BarmanCredentials.AWS != nil { + if configuration.EndpointCA != nil && configuration.AWS != nil { env = append(env, fmt.Sprintf("AWS_CA_BUNDLE=%s", postgres.BarmanRestoreEndpointCACertificateLocation)) - } else if configuration.EndpointCA != nil && configuration.BarmanCredentials.Azure != nil { + } else if configuration.EndpointCA != nil && configuration.Azure != nil { env = append(env, fmt.Sprintf("REQUESTS_CA_BUNDLE=%s", postgres.BarmanRestoreEndpointCACertificateLocation)) } return externalCluster.Name, env, externalCluster.BarmanObjectStore, nil @@ -342,9 +365,9 @@ func GetRecoverConfiguration( // back up this cluster if cluster.Spec.Backup != nil && cluster.Spec.Backup.BarmanObjectStore != nil { configuration := cluster.Spec.Backup.BarmanObjectStore - if configuration.EndpointCA != nil && configuration.BarmanCredentials.AWS != nil { + if configuration.EndpointCA != nil && configuration.AWS != nil { env = append(env, fmt.Sprintf("AWS_CA_BUNDLE=%s", postgres.BarmanBackupEndpointCACertificateLocation)) - } else if configuration.EndpointCA != nil && configuration.BarmanCredentials.Azure != nil { + } else if configuration.EndpointCA != nil && configuration.Azure != nil { env = append(env, fmt.Sprintf("REQUESTS_CA_BUNDLE=%s", postgres.BarmanBackupEndpointCACertificateLocation)) } return cluster.Name, env, cluster.Spec.Backup.BarmanObjectStore, nil diff --git a/internal/cmd/manager/walrestore/cmd_test.go b/internal/cmd/manager/walrestore/cmd_test.go index bc776e6b05..24c884d315 100644 --- a/internal/cmd/manager/walrestore/cmd_test.go +++ b/internal/cmd/manager/walrestore/cmd_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package walrestore diff --git a/internal/cmd/manager/walrestore/suite_test.go b/internal/cmd/manager/walrestore/suite_test.go index 97a75d2422..9a424b57af 100644 --- a/internal/cmd/manager/walrestore/suite_test.go +++ b/internal/cmd/manager/walrestore/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package walrestore diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index 8e770b5e51..f1bd825d21 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backup @@ -21,8 +24,10 @@ import ( "fmt" "slices" "strconv" + "strings" "time" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -43,6 +48,8 @@ type backupCommandOptions struct { online *bool immediateCheckpoint *bool waitForArchive *bool + pluginName string + pluginParameters pluginParameters } func (options backupCommandOptions) getOnlineConfiguration() *apiv1.OnlineConfiguration { @@ -58,12 +65,20 @@ func (options backupCommandOptions) getOnlineConfiguration() *apiv1.OnlineConfig // NewCmd creates the new "backup" subcommand func NewCmd() *cobra.Command { - var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive string + var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive, pluginName string + var pluginParameters pluginParameters + + backupMethods := []string{ + string(apiv1.BackupMethodBarmanObjectStore), + string(apiv1.BackupMethodVolumeSnapshot), + string(apiv1.BackupMethodPlugin), + } backupSubcommand := &cobra.Command{ - Use: "backup [cluster]", - Short: "Request an on-demand backup for a PostgreSQL Cluster", - Args: plugin.RequiresArguments(1), + Use: "backup CLUSTER", + Short: "Request an on-demand backup for a PostgreSQL Cluster", + GroupID: plugin.GroupIDDatabase, + Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, @@ -74,7 +89,7 @@ func NewCmd() *cobra.Command { backupName = fmt.Sprintf( "%s-%s", clusterName, - utils.ToCompactISO8601(time.Now()), + pgTime.ToCompactISO8601(time.Now()), ) } @@ -89,15 +104,29 @@ func NewCmd() *cobra.Command { } // Check if the backup method is correct - allowedBackupMethods := []string{ - "", - string(apiv1.BackupMethodBarmanObjectStore), - string(apiv1.BackupMethodVolumeSnapshot), - } + allowedBackupMethods := backupMethods + allowedBackupMethods = append(allowedBackupMethods, "") if !slices.Contains(allowedBackupMethods, backupMethod) { return fmt.Errorf("backup-method: %s is not supported by the backup command", backupMethod) } + if backupMethod == string(apiv1.BackupMethodPlugin) { + if len(pluginName) == 0 { + return fmt.Errorf("plugin-name is required when backup method in %s", + apiv1.BackupMethodPlugin) + } + } else { + if len(pluginName) > 0 { + return fmt.Errorf("plugin-name is allowed only when backup method in %s", + apiv1.BackupMethodPlugin) + } + + if len(pluginParameters) > 0 { + return fmt.Errorf("plugin-parameters is allowed only when backup method in %s", + apiv1.BackupMethodPlugin) + } + } + var cluster apiv1.Cluster // check if the cluster exists err := plugin.Client.Get( @@ -135,6 +164,8 @@ func NewCmd() *cobra.Command { online: parsedOnline, immediateCheckpoint: parsedImmediateCheckpoint, waitForArchive: parsedWaitForArchive, + pluginName: pluginName, + pluginParameters: pluginParameters, }) }, } @@ -144,7 +175,7 @@ func NewCmd() *cobra.Command { "backup-name", "", "The name of the Backup resource that will be created, "+ - "defaults to \"[cluster]-[current_timestamp]\"", + "defaults to \"CLUSTER-CURRENT_TIMESTAMP\"", ) backupSubcommand.Flags().StringVarP( &backupTarget, @@ -159,8 +190,8 @@ func NewCmd() *cobra.Command { "method", "m", "", - "If present, will override the backup method defined in backup resource, "+ - "valid values are volumeSnapshot and barmanObjectStore.", + fmt.Sprintf("If present, will override the backup method defined in backup resource, "+ + "valid values are: %s.", strings.Join(backupMethods, ", ")), ) const optionalAcceptedValues = "Optional. Accepted values: true|false|\"\"." @@ -180,12 +211,23 @@ func NewCmd() *cobra.Command { ) backupSubcommand.Flags().StringVar(&waitForArchive, "wait-for-archive", "", - "Set the '.spec.onlineConfiguratoin.waitForArchive' field of the "+ + "Set the '.spec.onlineConfiguration.waitForArchive' field of the "+ "Backup resource. If not specified, the value in the "+ "'.spec.backup.volumeSnapshot.onlineConfiguration' field will be used. "+ optionalAcceptedValues, ) + backupSubcommand.Flags().StringVar(&pluginName, "plugin-name", "", + "The name of the plugin that should take the backup. This option "+ + "is allowed only when the backup method is set to 'plugin'", + ) + + backupSubcommand.Flags().VarP(&pluginParameters, "plugin-parameters", "", + "The set of plugin parameters that should be passed to the backup plugin "+ + " i.e. param-one=value,param-two=value. This option "+ + "is allowed only when the backup method is set to 'plugin'", + ) + return backupSubcommand } @@ -208,6 +250,13 @@ func createBackup(ctx context.Context, options backupCommandOptions) error { } utils.LabelClusterName(&backup.ObjectMeta, options.clusterName) + if len(options.pluginName) > 0 { + backup.Spec.PluginConfiguration = &apiv1.BackupPluginConfiguration{ + Name: options.pluginName, + Parameters: options.pluginParameters, + } + } + err := plugin.Client.Create(ctx, &backup) if err == nil { fmt.Printf("backup/%v created\n", backup.Name) diff --git a/internal/cmd/plugin/backup/doc.go b/internal/cmd/plugin/backup/doc.go index 5672ee33a1..5e39e93408 100644 --- a/internal/cmd/plugin/backup/doc.go +++ b/internal/cmd/plugin/backup/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package backup implements a command to request an on-demand backup diff --git a/internal/cmd/plugin/backup/parameters.go b/internal/cmd/plugin/backup/parameters.go new file mode 100644 index 0000000000..57bae737b0 --- /dev/null +++ b/internal/cmd/plugin/backup/parameters.go @@ -0,0 +1,56 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package backup + +import ( + "strings" + + "github.com/cloudnative-pg/machinery/pkg/stringset" +) + +// pluginParameters is a set of parameters to be passed +// to the plugin when taking a backup +type pluginParameters map[string]string + +// String implements the pflag.Value interface +func (e pluginParameters) String() string { + return strings.Join(stringset.FromKeys(e).ToList(), ",") +} + +// Type implements the pflag.Value interface +func (e pluginParameters) Type() string { + return "map[string]string" +} + +// Set implements the pflag.Value interface +func (e *pluginParameters) Set(val string) error { + entries := strings.Split(val, ",") + result := make(map[string]string, len(entries)) + for _, entry := range entries { + if len(entry) == 0 { + continue + } + + before, after, _ := strings.Cut(entry, "=") + result[before] = after + } + *e = result + return nil +} diff --git a/internal/cmd/plugin/backup/parameters_test.go b/internal/cmd/plugin/backup/parameters_test.go new file mode 100644 index 0000000000..c83f95985d --- /dev/null +++ b/internal/cmd/plugin/backup/parameters_test.go @@ -0,0 +1,56 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package backup + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("plugin parameters parsing", func() { + DescribeTable( + "plugin parameters and values table", + func(value string, expectedParams pluginParameters) { + var params pluginParameters + Expect(params.Set(value)).ToNot(HaveOccurred()) + Expect(params).To(HaveLen(len(expectedParams))) + for k, v := range expectedParams { + Expect(params).To(HaveKeyWithValue(k, v)) + } + }, + Entry("empty value", "", nil), + Entry("singleton", "a=b", map[string]string{ + "a": "b", + }), + Entry("singleton without value", "a", map[string]string{ + "a": "", + }), + Entry("set", "a=b,c=d", map[string]string{ + "a": "b", + "c": "d", + }), + Entry("set with elements without value", "a=b,c,d=,e=f", map[string]string{ + "a": "b", + "c": "", + "d": "", + "e": "f", + }), + ) +}) diff --git a/internal/cmd/plugin/backup/suite_test.go b/internal/cmd/plugin/backup/suite_test.go new file mode 100644 index 0000000000..2ad9f64173 --- /dev/null +++ b/internal/cmd/plugin/backup/suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package backup + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCerts(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "CNPG Backup subcommand tests") +} diff --git a/internal/cmd/plugin/certificate/certificate.go b/internal/cmd/plugin/certificate/certificate.go index 18bd0f4072..b4fa7e2dc1 100644 --- a/internal/cmd/plugin/certificate/certificate.go +++ b/internal/cmd/plugin/certificate/certificate.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package certificate implement the kubectl-cnpg certificate command diff --git a/internal/cmd/plugin/certificate/cmd.go b/internal/cmd/plugin/certificate/cmd.go index e1359baf06..e8930de4ef 100644 --- a/internal/cmd/plugin/certificate/cmd.go +++ b/internal/cmd/plugin/certificate/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certificate @@ -32,7 +35,8 @@ func NewCmd() *cobra.Command { Long: `This command creates a new Kubernetes secret containing the crypto-material. This is needed to configure TLS with Certificate authentication access for an application to connect to the PostgreSQL cluster.`, - Args: plugin.RequiresArguments(1), + GroupID: plugin.GroupIDDatabase, + Args: plugin.RequiresArguments(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() secretName := args[0] diff --git a/internal/cmd/plugin/color.go b/internal/cmd/plugin/color.go index 913b3d3545..0fc9ebda50 100644 --- a/internal/cmd/plugin/color.go +++ b/internal/cmd/plugin/color.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin @@ -32,9 +35,9 @@ type colorConfiguration string const ( // colorAlways configures the output to always be colorized colorAlways colorConfiguration = "always" - // colorAuto configures the the output to be colorized only when attached to a terminal + // colorAuto configures the output to be colorized only when attached to a terminal colorAuto colorConfiguration = "auto" - // colorNever configures the the output never to be colorized + // colorNever configures the output never to be colorized colorNever colorConfiguration = "never" ) diff --git a/internal/cmd/plugin/color_test.go b/internal/cmd/plugin/color_test.go index 6fabdb6e10..2da07bac4b 100644 --- a/internal/cmd/plugin/color_test.go +++ b/internal/cmd/plugin/color_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/destroy/cmd.go b/internal/cmd/plugin/destroy/cmd.go index 168e50d548..0068b9bd01 100644 --- a/internal/cmd/plugin/destroy/cmd.go +++ b/internal/cmd/plugin/destroy/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package destroy @@ -29,9 +32,10 @@ import ( // NewCmd create the new "destroy" subcommand func NewCmd() *cobra.Command { destroyCmd := &cobra.Command{ - Use: "destroy [cluster] [node]", - Short: "Destroy the instance named [cluster]-[node] or [node] with the associated PVC", - Args: plugin.RequiresArguments(2), + Use: "destroy CLUSTER INSTANCE", + Short: "Destroy the instance named CLUSTER-INSTANCE with the associated PVC", + GroupID: plugin.GroupIDCluster, + Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() clusterName := args[0] diff --git a/internal/cmd/plugin/destroy/destroy.go b/internal/cmd/plugin/destroy/destroy.go index 11fbb036db..9f30cadcda 100644 --- a/internal/cmd/plugin/destroy/destroy.go +++ b/internal/cmd/plugin/destroy/destroy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package destroy implements a command to destroy an instances of a cluster and its associated PVC diff --git a/internal/cmd/plugin/fence/cmd.go b/internal/cmd/plugin/fence/cmd.go index 16eb2b0321..455703d613 100644 --- a/internal/cmd/plugin/fence/cmd.go +++ b/internal/cmd/plugin/fence/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package fence @@ -27,8 +30,8 @@ import ( var ( fenceOnCmd = &cobra.Command{ - Use: "on [cluster] [node]", - Short: `Fence an instance named [cluster]-[node] or [node]`, + Use: "on CLUSTER INSTANCE", + Short: `Fence an instance named CLUSTER-INSTANCE`, Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] @@ -42,8 +45,8 @@ var ( } fenceOffCmd = &cobra.Command{ - Use: "off [cluster] [node]", - Short: `Remove fence for an instance named [cluster]-[node] or [node]`, + Use: "off CLUSTER INSTANCE", + Short: `Remove fence for an instance named CLUSTER-INSTANCE`, Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] @@ -59,8 +62,9 @@ var ( // NewCmd creates the new "fencing" command func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "fencing", - Short: `Fencing related commands`, + Use: "fencing", + Short: `Fencing related commands`, + GroupID: plugin.GroupIDCluster, } cmd.AddCommand(fenceOnCmd) cmd.AddCommand(fenceOffCmd) diff --git a/internal/cmd/plugin/fence/fence.go b/internal/cmd/plugin/fence/fence.go index 33c4a1165a..880ba52961 100644 --- a/internal/cmd/plugin/fence/fence.go +++ b/internal/cmd/plugin/fence/fence.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fence implements a command to fence instances in a cluster diff --git a/internal/cmd/plugin/fio/cmd.go b/internal/cmd/plugin/fio/cmd.go index 7c53e97aad..e064e38f47 100644 --- a/internal/cmd/plugin/fio/cmd.go +++ b/internal/cmd/plugin/fio/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package fio @@ -22,6 +25,8 @@ import ( "os" "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd initializes the fio command @@ -35,6 +40,7 @@ func NewCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Long: `Creates a fio deployment that will execute a fio job on the specified pvc.`, Example: jobExample, + GroupID: plugin.GroupIDMiscellaneous, RunE: func(_ *cobra.Command, args []string) error { ctx := context.Background() fioArgs := args[1:] @@ -61,7 +67,7 @@ func NewCmd() *cobra.Command { fmt.Printf("To remove this test you need to delete the Deployment, ConfigMap "+ "and PVC with the name %v\n\nThe most simple way to do this is to re-run the command that was run"+ "to generate the deployment with the --dry-run flag and pipe that output to kubectl delete, e.g.:\n\n"+ - "kubectl cnpg fio --dry-run | kubectl delete -f -", deploymentName) + "kubectl cnpg fio --dry-run | kubectl delete -f -\n", deploymentName) } }, } diff --git a/internal/cmd/plugin/fio/doc.go b/internal/cmd/plugin/fio/doc.go index 12f8769e35..c00f151dbc 100644 --- a/internal/cmd/plugin/fio/doc.go +++ b/internal/cmd/plugin/fio/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fio implements the fio job creation via deployment diff --git a/internal/cmd/plugin/fio/fio.go b/internal/cmd/plugin/fio/fio.go index c06141c690..b7460516d9 100644 --- a/internal/cmd/plugin/fio/fio.go +++ b/internal/cmd/plugin/fio/fio.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fio implements the kubectl-cnpg fio sub-command @@ -29,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) type fioCommand struct { @@ -156,10 +160,44 @@ func (cmd *fioCommand) generateConfigMapObject() *corev1.ConfigMap { return result } +func getSecurityContext() *corev1.SecurityContext { + runAs := int64(10001) + sc := &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + RunAsNonRoot: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + ReadOnlyRootFilesystem: ptr.To(true), + } + if utils.HaveSecurityContextConstraints() { + return sc + } + + sc.RunAsUser = &runAs + sc.RunAsGroup = &runAs + sc.SeccompProfile = &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } + + return sc +} + +func getPodSecurityContext() *corev1.PodSecurityContext { + if utils.HaveSecurityContextConstraints() { + return &corev1.PodSecurityContext{} + } + runAs := int64(10001) + return &corev1.PodSecurityContext{ + FSGroup: &runAs, + } +} + // createFioDeployment creates spec of deployment. func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Deployment { - runAs := int64(10001) - fioDeployment := &appsv1.Deployment{ + return &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps/v1", Kind: "Deployment", @@ -229,22 +267,7 @@ func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Depl InitialDelaySeconds: 60, PeriodSeconds: 10, }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - RunAsGroup: &runAs, - RunAsNonRoot: ptr.To(true), - RunAsUser: &runAs, - - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - ReadOnlyRootFilesystem: ptr.To(true), - }, + SecurityContext: getSecurityContext(), Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ "memory": resource.MustParse("100M"), @@ -303,13 +326,10 @@ func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Depl }, }, }, - NodeSelector: map[string]string{}, - SecurityContext: &corev1.PodSecurityContext{ - FSGroup: &runAs, - }, + NodeSelector: map[string]string{}, + SecurityContext: getPodSecurityContext(), }, }, }, } - return fioDeployment } diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go index c13a8028cd..abe1adf243 100644 --- a/internal/cmd/plugin/hibernate/cmd.go +++ b/internal/cmd/plugin/hibernate/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,79 +13,54 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernate import ( + "context" "fmt" "github.com/spf13/cobra" + "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) var ( hibernateOnCmd = &cobra.Command{ - Use: "on [cluster]", - Short: "Hibernates the cluster named [cluster]", + Use: "on CLUSTER", + Short: "Hibernates the cluster named CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] - force, err := cmd.Flags().GetBool("force") - if err != nil { - return err - } - - hibernateOn, err := newOnCommand(cmd.Context(), clusterName, force) - if err != nil { - return err - } - - return hibernateOn.execute() + return annotateCluster(cmd.Context(), plugin.Client, client.ObjectKey{ + Name: clusterName, + Namespace: plugin.Namespace, + }, utils.HibernationAnnotationValueOn) }, } hibernateOffCmd = &cobra.Command{ - Use: "off [cluster]", - Short: "Bring the cluster named [cluster] back from hibernation", + Use: "off CLUSTER", + Short: "Bring the cluster named CLUSTER back from hibernation", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] - off := newOffCommand(cmd.Context(), clusterName) - return off.execute() - }, - } - - hibernateStatusCmd = &cobra.Command{ - Use: "status [cluster]", - Short: "Prints the hibernation status for the [cluster]", - Args: plugin.RequiresArguments(1), - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp - }, - RunE: func(cmd *cobra.Command, args []string) error { - clusterName := args[0] - rawOutput, err := cmd.Flags().GetString("output") - if err != nil { - return err - } - - outputFormat := plugin.OutputFormat(rawOutput) - switch outputFormat { - case plugin.OutputFormatJSON, plugin.OutputFormatYAML: - return newStatusCommandStructuredOutput(cmd.Context(), clusterName, outputFormat).execute() - case plugin.OutputFormatText: - return newStatusCommandTextOutput(cmd.Context(), clusterName).execute() - default: - return fmt.Errorf("output: %s is not supported by the hibernate CLI", rawOutput) - } + return annotateCluster(cmd.Context(), plugin.Client, client.ObjectKey{ + Name: clusterName, + Namespace: plugin.Namespace, + }, utils.HibernationAnnotationValueOff) }, } ) @@ -92,25 +68,44 @@ var ( // NewCmd initializes the hibernate command func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "hibernate", - Short: `Hibernation related commands`, + Use: "hibernate", + Short: `Hibernation related commands`, + GroupID: plugin.GroupIDCluster, } cmd.AddCommand(hibernateOnCmd) cmd.AddCommand(hibernateOffCmd) - cmd.AddCommand(hibernateStatusCmd) - - hibernateOnCmd.Flags().Bool( - "force", - false, - "Force the hibernation procedure even if the preconditions are not met") - hibernateStatusCmd.Flags(). - StringP( - "output", - "o", - "text", - "Output format. One of text, json, or yaml", - ) return cmd } + +func annotateCluster( + ctx context.Context, + cli client.Client, + clusterKey client.ObjectKey, + value utils.HibernationAnnotationValue, +) error { + var cluster apiv1.Cluster + + if err := cli.Get(ctx, clusterKey, &cluster); err != nil { + return fmt.Errorf("failed to get cluster %s: %w", clusterKey.Name, err) + } + + if cluster.Annotations == nil { + cluster.SetAnnotations(make(map[string]string)) + } + + origCluster := cluster.DeepCopy() + + cluster.Annotations[utils.HibernationAnnotationName] = string(value) + + if cluster.Annotations[utils.HibernationAnnotationName] == origCluster.Annotations[utils.HibernationAnnotationName] { + return fmt.Errorf("cluster %s is already in the requested state", clusterKey.Name) + } + + if err := cli.Patch(ctx, &cluster, client.MergeFrom(origCluster)); err != nil { + return fmt.Errorf("failed to patch cluster %s: %w", clusterKey.Name, err) + } + + return nil +} diff --git a/internal/cmd/plugin/hibernate/cmd_test.go b/internal/cmd/plugin/hibernate/cmd_test.go new file mode 100644 index 0000000000..6efbf7c3a3 --- /dev/null +++ b/internal/cmd/plugin/hibernate/cmd_test.go @@ -0,0 +1,116 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package hibernate + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("annotateCluster", func() { + var ( + cluster *apiv1.Cluster + cli k8client.Client + clusterKey k8client.ObjectKey + ) + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + } + cli = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).WithObjects(cluster).Build() + clusterKey = k8client.ObjectKeyFromObject(cluster) + }) + + It("annotates the cluster with hibernation on", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster := &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOn))) + }) + + It("annotates the cluster with hibernation off", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOff) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster := &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOff))) + }) + + It("returns an error if the cluster is already in the requested state", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).ToNot(HaveOccurred()) + + err = annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("cluster %s is already in the requested state", clusterKey.Name))) + }) + + It("returns an error if the cluster cannot be retrieved", func(ctx SpecContext) { + nonExistingClusterKey := k8client.ObjectKey{ + Name: "non-existing-cluster", + Namespace: "test-namespace", + } + + err := annotateCluster(ctx, cli, nonExistingClusterKey, utils.HibernationAnnotationValueOn) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("failed to get cluster %s", nonExistingClusterKey.Name))) + }) + + It("toggles hibernation from on to off", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster := &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOn))) + + err = annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOff) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster = &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOff))) + }) +}) diff --git a/internal/cmd/plugin/hibernate/doc.go b/internal/cmd/plugin/hibernate/doc.go index a6373346ee..be531991fc 100644 --- a/internal/cmd/plugin/hibernate/doc.go +++ b/internal/cmd/plugin/hibernate/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package hibernate implements the hibernation feature diff --git a/internal/cmd/plugin/hibernate/off.go b/internal/cmd/plugin/hibernate/off.go deleted file mode 100644 index 2a6e9a23c7..0000000000 --- a/internal/cmd/plugin/hibernate/off.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "fmt" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/strings/slices" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// offCommand represent the `hibernate off` command -type offCommand struct { - ctx context.Context - clusterName string -} - -// newOffCommand creates a new `hibernate off` command -func newOffCommand(ctx context.Context, clusterName string) *offCommand { - contextLogger := log.FromContext(ctx).WithValues( - "clusterName", clusterName) - - return &offCommand{ - ctx: log.IntoContext(ctx, contextLogger), - clusterName: clusterName, - } -} - -// execute executes the `hibernate off` command -func (off *offCommand) execute() error { - off.printAdvancement("cluster reactivation starting") - - // Ensuring the cluster doesn't exist - if err := off.ensureClusterDoesNotExistStep(); err != nil { - return err - } - - // Get the list of PVC from which we need to resume this cluster - pvcGroup, err := getHibernatedPVCGroup(off.ctx, off.clusterName) - if err != nil { - return err - } - - // Ensure the list of PVCs we have is correct - if err := off.ensurePVCsArePartOfAPVCGroupStep(pvcGroup); err != nil { - return err - } - - // We recreate the cluster resource from the first PVC of the group, - // and don't care of which PVC we select because we annotate - // each PVC of a group with the same data. - pvc := pvcGroup[0] - - // We get the original cluster resource from the annotation - clusterFromPVC, err := getClusterFromPVCAnnotation(pvc) - if err != nil { - return err - } - - // And recreate it into the Kubernetes cluster - if err := off.createClusterWithoutRuntimeDataStep(clusterFromPVC); err != nil { - return err - } - - off.printAdvancement("cluster reactivation completed") - - return nil -} - -// ensureClusterDoesNotExistStep checks if this cluster exist or not, ensuring -// that it is not present -func (off *offCommand) ensureClusterDoesNotExistStep() error { - var cluster apiv1.Cluster - err := plugin.Client.Get( - off.ctx, - types.NamespacedName{Name: off.clusterName, Namespace: plugin.Namespace}, - &cluster, - ) - if err == nil { - return fmt.Errorf("cluster already exist, cannot proceed with reactivation") - } - if !apierrs.IsNotFound(err) { - return err - } - return nil -} - -// ensurePVCsArePartOfAPVCGroupStep check if the passed PVCs are really part of the same group -func (off *offCommand) ensurePVCsArePartOfAPVCGroupStep(pvcs []corev1.PersistentVolumeClaim) error { - // ensure all the pvcs belong to the same node serial and are hibernated - var nodeSerial []string - for _, pvc := range pvcs { - // IMPORTANT: do not use utils.ClusterManifestAnnotationName, utils.PgControlDataAnnotationName here for backwards - // compatibility - if err := ensureAnnotationsExists( - pvc, - utils.HibernateClusterManifestAnnotationName, - utils.HibernatePgControlDataAnnotationName, - utils.ClusterSerialAnnotationName, - ); err != nil { - return err - } - - serial := pvc.Annotations[utils.ClusterSerialAnnotationName] - if !slices.Contains(nodeSerial, serial) { - nodeSerial = append(nodeSerial, serial) - } - } - if len(nodeSerial) != 1 { - return fmt.Errorf("hibernate pvcs belong to different instances of the cluster, cannot proceed") - } - - return nil -} - -// createClusterWithoutRuntimeDataStep recreate the original cluster back into Kubernetes -func (off *offCommand) createClusterWithoutRuntimeDataStep(clusterFromPVC apiv1.Cluster) error { - cluster := clusterFromPVC.DeepCopy() - // remove any runtime kubernetes metadata - cluster.ObjectMeta.ResourceVersion = "" - cluster.ObjectMeta.ManagedFields = nil - cluster.ObjectMeta.UID = "" - cluster.ObjectMeta.Generation = 0 - cluster.ObjectMeta.CreationTimestamp = metav1.Time{} - // remove cluster status - cluster.Status = apiv1.ClusterStatus{} - - // remove any runtime kubernetes annotations - delete(cluster.Annotations, corev1.LastAppliedConfigAnnotation) - - // remove the cluster fencing - delete(cluster.Annotations, utils.FencedInstanceAnnotation) - - // create cluster - return plugin.Client.Create(off.ctx, cluster) -} - -// ensureAnnotationsExists returns an error if the passed PVC is annotated with all the -// passed annotations names -func ensureAnnotationsExists(volume corev1.PersistentVolumeClaim, annotationNames ...string) error { - for _, annotationName := range annotationNames { - if _, ok := volume.Annotations[annotationName]; !ok { - return fmt.Errorf("missing %s annotation, from pvcs: %s", annotationName, volume.Name) - } - } - - return nil -} - -func (off *offCommand) printAdvancement(msg string) { - fmt.Println(msg) -} diff --git a/internal/cmd/plugin/hibernate/on.go b/internal/cmd/plugin/hibernate/on.go deleted file mode 100644 index 65b36e9c88..0000000000 --- a/internal/cmd/plugin/hibernate/on.go +++ /dev/null @@ -1,371 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/destroy" - pluginresources "github.com/cloudnative-pg/cloudnative-pg/internal/plugin/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -var hibernationBackoff = wait.Backoff{ - Steps: 4, - Duration: 10 * time.Second, - Factor: 5.0, - Jitter: 0.1, -} - -// onCommand represent the `hibernate on` subcommand -type onCommand struct { - ctx context.Context - cluster *apiv1.Cluster - primaryInstanceSerial int - force bool - shouldRollback bool - - managedInstances []corev1.Pod - primaryInstance corev1.Pod - pvcs []corev1.PersistentVolumeClaim -} - -// newOnCommand creates a new `hibernate on` command -func newOnCommand(ctx context.Context, clusterName string, force bool) (*onCommand, error) { - var cluster apiv1.Cluster - - // Get the Cluster object - err := plugin.Client.Get( - ctx, - client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, - &cluster) - if err != nil { - return nil, fmt.Errorf("could not get cluster: %v", err) - } - - // Get the instances to be hibernated - managedInstances, primaryInstance, err := pluginresources.GetInstancePods(ctx, clusterName) - if err != nil { - return nil, fmt.Errorf("could not get cluster pods: %w", err) - } - if primaryInstance.Name == "" { - return nil, fmt.Errorf("no primary instance found, cannot proceed with the hibernation") - } - - // Get the PVCs that will be hibernated - pvcs, err := persistentvolumeclaim.GetInstancePVCs(ctx, plugin.Client, primaryInstance.Name, plugin.Namespace) - if err != nil { - return nil, fmt.Errorf("cannot get PVCs: %w", err) - } - - // Get the serial ID of the primary instance - primaryInstanceSerial, err := specs.GetNodeSerial(primaryInstance.ObjectMeta) - if err != nil { - return nil, fmt.Errorf("could not get the primary node: %w", err) - } - - contextLogger := log.FromContext(ctx).WithValues( - "clusterName", clusterName, - "primaryInstance", primaryInstance.Name) - - return &onCommand{ - ctx: log.IntoContext(ctx, contextLogger), - cluster: &cluster, - primaryInstanceSerial: primaryInstanceSerial, - managedInstances: managedInstances, - primaryInstance: primaryInstance, - pvcs: pvcs, - force: force, - shouldRollback: false, - }, nil -} - -// execute executes the `hibernate on` command -func (on *onCommand) execute() error { - // Check the hibernation preconditions - if err := on.checkPreconditionsStep(); err != nil { - return err - } - - on.printAdvancement("hibernation process starting...") - - if err := on.fenceClusterStep(); err != nil { - on.shouldRollback = true - return err - } - defer on.rollbackFenceClusterIfNeeded() - - on.printAdvancement("waiting for the cluster to be fenced") - - if err := on.waitInstancesToBeFencedStep(); err != nil { - on.shouldRollback = true - return err - } - - on.printAdvancement("cluster is now fenced, storing primary pg_controldata output") - - if err := on.annotatePVCStep(); err != nil { - on.shouldRollback = true - return err - } - defer on.rollBackAnnotationsIfNeeded() - - on.printAdvancement("PVC annotation complete") - - if err := on.deleteResourcesStep(); err != nil { - on.shouldRollback = true - return err - } - - on.printAdvancement("Hibernation completed") - return nil -} - -// checkPreconditionsStep checks if the preconditions for the execution of this step are -// met or not. If they are not met, it will return an error -func (on *onCommand) checkPreconditionsStep() error { - contextLogger := log.FromContext(on.ctx) - - // We should refuse to hibernate a cluster that was fenced already - fencedInstances, err := utils.GetFencedInstances(on.cluster.Annotations) - if err != nil { - return fmt.Errorf("could not check if cluster is fenced: %v", err) - } - - if fencedInstances.Len() > 0 { - if on.force { - contextLogger.Warning("Continuing hibernation procedure even if there are fenced instances") - } else { - return fmt.Errorf("cannot hibernate a cluster that has fenced instances") - } - } - - return nil -} - -func (on *onCommand) fenceClusterStep() error { - contextLogger := log.FromContext(on.ctx) - - contextLogger.Debug("applying the fencing annotation to the cluster manifest") - if err := utils.NewFencingMetadataExecutor(plugin.Client). - AddFencing(). - ForAllInstances(). - Execute( - on.ctx, - types.NamespacedName{Name: on.cluster.Name, Namespace: plugin.Namespace}, - &apiv1.Cluster{}, - ); err != nil { - return err - } - contextLogger.Debug("fencing annotation set on the cluster manifest") - - return nil -} - -// rollbackFenceClusterIfNeeded removes the fencing status from the -// cluster -func (on *onCommand) rollbackFenceClusterIfNeeded() { - if !on.shouldRollback { - return - } - - contextLogger := log.FromContext(on.ctx) - - fmt.Println("rolling back hibernation: removing the fencing annotation") - if err := utils.NewFencingMetadataExecutor(plugin.Client). - RemoveFencing(). - ForAllInstances(). - Execute(on.ctx, - types.NamespacedName{Name: on.cluster.Name, Namespace: plugin.Namespace}, &apiv1.Cluster{}); err != nil { - contextLogger.Error(err, "Rolling back from hibernation failed") - } -} - -// waitInstancesToBeFenced waits for all instances to be shut down -func (on *onCommand) waitInstancesToBeFencedStep() error { - for _, instance := range on.managedInstances { - if err := retry.OnError(hibernationBackoff, resources.RetryAlways, func() error { - running, err := pluginresources.IsInstanceRunning(on.ctx, instance) - if err != nil { - return fmt.Errorf("error checking instance status (%v): %w", instance.Name, err) - } - if running { - return fmt.Errorf("instance still running (%v)", instance.Name) - } - return nil - }); err != nil { - return err - } - } - - return nil -} - -// annotatePVCStep stores the pg_controldata output -// into an annotation of the primary PVC -func (on *onCommand) annotatePVCStep() error { - controlData, err := plugin.GetPGControlData(on.ctx, on.primaryInstance) - if err != nil { - return fmt.Errorf("could not get primary control data: %w", err) - } - on.printAdvancement("primary pg_controldata output fetched") - - on.printAdvancement("annotating the PVC with the cluster manifest") - if err := annotatePVCs(on.ctx, on.pvcs, on.cluster, controlData); err != nil { - return fmt.Errorf("could not annotate PVCs: %w", err) - } - - return nil -} - -func (on *onCommand) rollBackAnnotationsIfNeeded() { - if !on.shouldRollback { - return - } - - fmt.Println("rolling back hibernation: removing pvc annotations") - err := removePVCannotations(on.ctx, on.pvcs) - if err != nil { - fmt.Printf("could not remove PVC annotations: %v", err) - } -} - -func (on *onCommand) deleteResourcesStep() error { - on.printAdvancement("destroying the primary instance while preserving the pvc") - - // from this point there is no going back - if err := destroy.Destroy( - on.ctx, - on.cluster.Name, - fmt.Sprintf("%s-%s", on.cluster.Name, strconv.Itoa(on.primaryInstanceSerial)), - true, - ); err != nil { - return fmt.Errorf("error destroying primary instance: %w", err) - } - on.printAdvancement("primary instance destroy completed") - - on.printAdvancement("deleting the cluster resource") - if err := plugin.Client.Delete(on.ctx, on.cluster); err != nil { - return fmt.Errorf("error while deleting cluster resource: %w", err) - } - on.printAdvancement("cluster resource deletion complete") - - return nil -} - -func (on *onCommand) printAdvancement(msg string) { - fmt.Println(msg) -} - -func annotatePVCs( - ctx context.Context, - pvcs []corev1.PersistentVolumeClaim, - cluster *apiv1.Cluster, - pgControlData string, -) error { - for _, pvc := range pvcs { - if err := retry.OnError(retry.DefaultBackoff, resources.RetryAlways, func() error { - var currentPVC corev1.PersistentVolumeClaim - if err := plugin.Client.Get( - ctx, - types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, - ¤tPVC, - ); err != nil { - return err - } - - if currentPVC.Annotations == nil { - currentPVC.Annotations = map[string]string{} - } - origPVC := currentPVC.DeepCopy() - - // IMPORTANT: do not use utils.ClusterManifestAnnotationName, utils.PgControlDataAnnotationName here for backwards - // compatibility - _, hasHibernateAnnotation := currentPVC.Annotations[utils.HibernateClusterManifestAnnotationName] - _, hasPgControlDataAnnotation := currentPVC.Annotations[utils.HibernatePgControlDataAnnotationName] - if hasHibernateAnnotation || hasPgControlDataAnnotation { - return fmt.Errorf("the PVC already contains Hibernation annotations. Erroring out") - } - - bytes, err := json.Marshal(&cluster) - if err != nil { - return err - } - - currentPVC.Annotations[utils.HibernateClusterManifestAnnotationName] = string(bytes) - currentPVC.Annotations[utils.HibernatePgControlDataAnnotationName] = pgControlData - currentPVC.Annotations[utils.ClusterManifestAnnotationName] = string(bytes) - currentPVC.Annotations[utils.PgControldataAnnotationName] = pgControlData - - return plugin.Client.Patch(ctx, ¤tPVC, client.MergeFrom(origPVC)) - }); err != nil { - return err - } - } - - return nil -} - -func removePVCannotations( - ctx context.Context, - pvcs []corev1.PersistentVolumeClaim, -) error { - for _, pvc := range pvcs { - if err := retry.OnError(retry.DefaultBackoff, resources.RetryAlways, func() error { - var currentPVC corev1.PersistentVolumeClaim - if err := plugin.Client.Get( - ctx, - types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, - ¤tPVC, - ); err != nil { - return err - } - - if currentPVC.Annotations == nil { - return nil - } - origPVC := currentPVC.DeepCopy() - - delete(currentPVC.Annotations, utils.HibernateClusterManifestAnnotationName) - delete(currentPVC.Annotations, utils.HibernatePgControlDataAnnotationName) - delete(currentPVC.Annotations, utils.ClusterManifestAnnotationName) - delete(currentPVC.Annotations, utils.PgControldataAnnotationName) - - return plugin.Client.Patch(ctx, ¤tPVC, client.MergeFrom(origPVC)) - }); err != nil { - return err - } - } - - return nil -} diff --git a/internal/cmd/plugin/hibernate/output.go b/internal/cmd/plugin/hibernate/output.go deleted file mode 100644 index d337f5e0ee..0000000000 --- a/internal/cmd/plugin/hibernate/output.go +++ /dev/null @@ -1,210 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "bytes" - "context" - "fmt" - "os" - "strings" - "text/tabwriter" - - "github.com/cheynewallace/tabby" - "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/logrusorgru/aurora/v4" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/yaml" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -const ( - // statusClusterManifestNotFound is an error message reported when no cluster manifest is not found - statusClusterManifestNotFound = "Cluster manifest not found" -) - -// statusOutputManager is a type capable of executing a status output request -type statusOutputManager interface { - addHibernationSummaryInformation(level statusLevel, statusMessage, clusterName string) - addClusterManifestInformation(cluster *apiv1.Cluster) - addPVCGroupInformation(pvcs []corev1.PersistentVolumeClaim) - // execute renders the output - execute() error -} - -type textStatusOutputManager struct { - textPrinter *tabby.Tabby - stdoutBuffer *bytes.Buffer -} - -func newTextStatusOutputManager() *textStatusOutputManager { - buffer := &bytes.Buffer{} - return &textStatusOutputManager{ - textPrinter: tabby.NewCustom(tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0)), - stdoutBuffer: buffer, - } -} - -func (t *textStatusOutputManager) getColor(level statusLevel) aurora.Color { - switch level { - case warningLevel: - return aurora.YellowFg - case errorLevel: - return aurora.RedFg - default: - return aurora.GreenFg - } -} - -func (t *textStatusOutputManager) addHibernationSummaryInformation( - level statusLevel, - statusMessage string, - clusterName string, -) { - t.textPrinter.AddHeader(aurora.Colorize("Hibernation Summary", t.getColor(level))) - t.textPrinter.AddLine("Hibernation status", statusMessage) - t.textPrinter.AddLine("Cluster Name", clusterName) - t.textPrinter.AddLine("Cluster Namespace", plugin.Namespace) - t.textPrinter.AddLine() -} - -func (t *textStatusOutputManager) addClusterManifestInformation( - cluster *apiv1.Cluster, -) { - if cluster == nil { - t.textPrinter.AddHeader(aurora.Red("Cluster Spec Information")) - t.textPrinter.AddLine(aurora.Red(statusClusterManifestNotFound)) - return - } - - t.textPrinter.AddHeader(aurora.Green("Cluster Spec Information")) - bytesArray, err := yaml.Marshal(cluster.Spec) - if err != nil { - const message = "Could not serialize the cluster manifest" - t.textPrinter.AddLine(aurora.Red(message)) - return - } - - t.textPrinter.AddLine(string(bytesArray)) - t.textPrinter.AddLine() -} - -func (t *textStatusOutputManager) addPVCGroupInformation( - pvcs []corev1.PersistentVolumeClaim, -) { - if len(pvcs) == 0 { - return - } - - // there is no need to iterate the pvc group, it is either all valid or none - value, ok := pvcs[0].Annotations[utils.HibernatePgControlDataAnnotationName] - if !ok { - return - } - - t.textPrinter.AddHeader(aurora.Green("PostgreSQL instance control information")) - t.textPrinter.AddLine(value) -} - -func (t *textStatusOutputManager) execute() error { - // do not remove this is to flush the writer cache into the buffer - t.textPrinter.Print() - fmt.Print(t.stdoutBuffer) - return nil -} - -type structuredStatusOutputManager struct { - mapToSerialize map[string]interface{} - format plugin.OutputFormat - ctx context.Context -} - -func newStructuredOutputManager(ctx context.Context, format plugin.OutputFormat) *structuredStatusOutputManager { - return &structuredStatusOutputManager{ - mapToSerialize: map[string]interface{}{}, - format: format, - ctx: ctx, - } -} - -func (t *structuredStatusOutputManager) addHibernationSummaryInformation( - level statusLevel, - statusMessage string, - clusterName string, -) { - t.mapToSerialize["summary"] = map[string]string{ - "status": statusMessage, - "clusterName": clusterName, - "namespace": plugin.Namespace, - "level": string(level), - } -} - -func (t *structuredStatusOutputManager) addClusterManifestInformation( - cluster *apiv1.Cluster, -) { - tmpMap := map[string]interface{}{} - defer func() { - t.mapToSerialize["cluster"] = tmpMap - }() - - if cluster == nil { - tmpMap["error"] = statusClusterManifestNotFound - return - } - - tmpMap["spec"] = cluster.Spec -} - -func (t *structuredStatusOutputManager) addPVCGroupInformation( - pvcs []corev1.PersistentVolumeClaim, -) { - contextLogger := log.FromContext(t.ctx) - - // there is no need to iterate the pvc group, it is either all valid or none - value, ok := pvcs[0].Annotations[utils.HibernatePgControlDataAnnotationName] - if !ok { - return - } - - tmp := map[string]string{} - rows := strings.Split(value, "\n") - for _, row := range rows { - // skip empty rows - if strings.Trim(row, " ") == "" { - continue - } - - res := strings.SplitN(row, ":", 2) - if len(res) != 2 { - // bad row parsing, we skip it - contextLogger.Warning("skipping row because it was malformed", "row", row) - tmp["error"] = "one or more rows could not be parsed" - continue - } - tmp[res[0]] = strings.Trim(res[1], " ") - } - - t.mapToSerialize["pgControlData"] = tmp -} - -func (t *structuredStatusOutputManager) execute() error { - return plugin.Print(t.mapToSerialize, t.format, os.Stdout) -} diff --git a/internal/cmd/plugin/hibernate/resources.go b/internal/cmd/plugin/hibernate/resources.go deleted file mode 100644 index f8deae24aa..0000000000 --- a/internal/cmd/plugin/hibernate/resources.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "encoding/json" - "fmt" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// errNoHibernatedPVCsFound indicates that no PVCs were found. -var errNoHibernatedPVCsFound = fmt.Errorf("no hibernated PVCs to reactivate found") - -// getHibernatedPVCGroupStep gets the PVC group resulting from the hibernation process -func getHibernatedPVCGroup(ctx context.Context, clusterName string) ([]corev1.PersistentVolumeClaim, error) { - // Get the list of PVCs belonging to this group - var pvcList corev1.PersistentVolumeClaimList - if err := plugin.Client.List( - ctx, - &pvcList, - client.MatchingLabels{utils.ClusterLabelName: clusterName}, - client.InNamespace(plugin.Namespace), - ); err != nil { - return nil, err - } - if len(pvcList.Items) == 0 { - return nil, errNoHibernatedPVCsFound - } - - return pvcList.Items, nil -} - -// getClusterFromPVCAnnotation reads the original cluster resource from the chosen PVC -func getClusterFromPVCAnnotation(pvc corev1.PersistentVolumeClaim) (apiv1.Cluster, error) { - var clusterFromPVC apiv1.Cluster - // get the cluster manifest - clusterJSON := pvc.Annotations[utils.HibernateClusterManifestAnnotationName] - if err := json.Unmarshal([]byte(clusterJSON), &clusterFromPVC); err != nil { - return apiv1.Cluster{}, err - } - return clusterFromPVC, nil -} diff --git a/internal/cmd/plugin/hibernate/status.go b/internal/cmd/plugin/hibernate/status.go deleted file mode 100644 index f425d85959..0000000000 --- a/internal/cmd/plugin/hibernate/status.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "errors" - "fmt" - - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" -) - -// statusLevel describes if the output should communicate an ok,warning or error status -type statusLevel string - -const ( - okLevel statusLevel = "ok" - warningLevel statusLevel = "warning" - errorLevel statusLevel = "error" -) - -type statusCommand struct { - outputManager statusOutputManager - ctx context.Context - clusterName string -} - -func newStatusCommandStructuredOutput( - ctx context.Context, - clusterName string, - format plugin.OutputFormat, -) *statusCommand { - return &statusCommand{ - outputManager: newStructuredOutputManager(ctx, format), - ctx: ctx, - clusterName: clusterName, - } -} - -func newStatusCommandTextOutput(ctx context.Context, clusterName string) *statusCommand { - return &statusCommand{ - outputManager: newTextStatusOutputManager(), - ctx: ctx, - clusterName: clusterName, - } -} - -func (cmd *statusCommand) execute() error { - isDeployed, err := cmd.isClusterDeployed() - if err != nil { - return err - } - if isDeployed { - return cmd.clusterIsAlreadyRunningOutput() - } - - pvcs, err := getHibernatedPVCGroup(cmd.ctx, cmd.clusterName) - if errors.Is(err, errNoHibernatedPVCsFound) { - return cmd.noHibernatedPVCsOutput() - } - if err != nil { - return err - } - - return cmd.clusterHibernatedOutput(pvcs) -} - -func (cmd *statusCommand) clusterHibernatedOutput(pvcs []corev1.PersistentVolumeClaim) error { - clusterFromPVC, err := getClusterFromPVCAnnotation(pvcs[0]) - if err != nil { - return err - } - - cmd.outputManager.addHibernationSummaryInformation(okLevel, "Cluster Hibernated", cmd.clusterName) - cmd.outputManager.addClusterManifestInformation(&clusterFromPVC) - cmd.outputManager.addPVCGroupInformation(pvcs) - - return cmd.outputManager.execute() -} - -func (cmd *statusCommand) clusterIsAlreadyRunningOutput() error { - cmd.outputManager.addHibernationSummaryInformation(warningLevel, "No Hibernation. Cluster Deployed.", cmd.clusterName) - return cmd.outputManager.execute() -} - -func (cmd *statusCommand) noHibernatedPVCsOutput() error { - cmd.outputManager.addHibernationSummaryInformation(errorLevel, "No hibernated PVCs found", cmd.clusterName) - return cmd.outputManager.execute() -} - -func (cmd *statusCommand) isClusterDeployed() (bool, error) { - var cluster apiv1.Cluster - - // Get the Cluster object - err := plugin.Client.Get(cmd.ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: cmd.clusterName}, &cluster) - if apierrs.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, fmt.Errorf("error while fetching the cluster resource: %w", err) - } - - return true, nil -} diff --git a/internal/cmd/plugin/hibernate/suite_test.go b/internal/cmd/plugin/hibernate/suite_test.go new file mode 100644 index 0000000000..491346b9de --- /dev/null +++ b/internal/cmd/plugin/hibernate/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package hibernate_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestHibernate(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Hibernate Suite") +} diff --git a/internal/cmd/plugin/install/cmd.go b/internal/cmd/plugin/install/cmd.go index 33ee9ee9db..fa8d2a3f75 100644 --- a/internal/cmd/plugin/install/cmd.go +++ b/internal/cmd/plugin/install/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +13,24 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package install import ( "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd returns the installation root cmd func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "install", - Short: "CNPG installation commands", + Use: "install", + Short: "CloudNativePG installation-related commands", + GroupID: plugin.GroupIDAdmin, } cmd.AddCommand(newGenerateCmd()) diff --git a/internal/cmd/plugin/install/doc.go b/internal/cmd/plugin/install/doc.go index 00c114b8ca..4124755c60 100644 --- a/internal/cmd/plugin/install/doc.go +++ b/internal/cmd/plugin/install/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package install implements the install plugin command diff --git a/internal/cmd/plugin/install/generate.go b/internal/cmd/plugin/install/generate.go index 243739e89f..ef9e969157 100644 --- a/internal/cmd/plugin/install/generate.go +++ b/internal/cmd/plugin/install/generate.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package install diff --git a/internal/cmd/plugin/logical/database.go b/internal/cmd/plugin/logical/database.go index d9f388a2ba..2a416388eb 100644 --- a/internal/cmd/plugin/logical/database.go +++ b/internal/cmd/plugin/logical/database.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logical @@ -40,7 +43,7 @@ func GetApplicationDatabaseName(ctx context.Context, clusterName string) (string &cluster, ) if err != nil { - return "", fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return "", fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err) } return cluster.GetApplicationDatabaseName(), nil diff --git a/internal/cmd/plugin/logical/doc.go b/internal/cmd/plugin/logical/doc.go index e0b57900fd..be21904a27 100644 --- a/internal/cmd/plugin/logical/doc.go +++ b/internal/cmd/plugin/logical/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logical contains the common features of the diff --git a/internal/cmd/plugin/logical/externalcluster.go b/internal/cmd/plugin/logical/externalcluster.go index 8a21726743..0304c4a81d 100644 --- a/internal/cmd/plugin/logical/externalcluster.go +++ b/internal/cmd/plugin/logical/externalcluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logical @@ -46,7 +49,7 @@ func GetConnectionString( &cluster, ) if err != nil { - return "", fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return "", fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err) } externalCluster, ok := cluster.ExternalCluster(externalClusterName) diff --git a/internal/cmd/plugin/logical/psql.go b/internal/cmd/plugin/logical/psql.go index 3c4b1c1670..c1c3af41c6 100644 --- a/internal/cmd/plugin/logical/psql.go +++ b/internal/cmd/plugin/logical/psql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logical @@ -64,6 +67,8 @@ func getSQLCommand( ) (*psql.Command, error) { psqlArgs := []string{ connectionString, + "-U", + "postgres", "-c", sqlCommand, } diff --git a/internal/cmd/plugin/logical/publication/cmd.go b/internal/cmd/plugin/logical/publication/cmd.go index 0409a2b04e..9ce166a5de 100644 --- a/internal/cmd/plugin/logical/publication/cmd.go +++ b/internal/cmd/plugin/logical/publication/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package publication @@ -19,6 +22,7 @@ package publication import ( "github.com/spf13/cobra" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication/create" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication/drop" ) @@ -26,8 +30,9 @@ import ( // NewCmd initializes the publication command func NewCmd() *cobra.Command { publicationCmd := &cobra.Command{ - Use: "publication", - Short: "Logical publication management commands", + Use: "publication", + Short: "Logical publication management commands", + GroupID: plugin.GroupIDDatabase, } publicationCmd.AddCommand(create.NewCmd()) publicationCmd.AddCommand(drop.NewCmd()) diff --git a/internal/cmd/plugin/logical/publication/create/cmd.go b/internal/cmd/plugin/logical/publication/create/cmd.go index 966500911b..628dc47e1b 100644 --- a/internal/cmd/plugin/logical/publication/create/cmd.go +++ b/internal/cmd/plugin/logical/publication/create/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create @@ -38,7 +41,7 @@ func NewCmd() *cobra.Command { var dryRun bool publicationCreateCmd := &cobra.Command{ - Use: "create cluster_name", + Use: "create CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/logical/publication/create/doc.go b/internal/cmd/plugin/logical/publication/create/doc.go index 5717d3abdf..2a38546f4c 100644 --- a/internal/cmd/plugin/logical/publication/create/doc.go +++ b/internal/cmd/plugin/logical/publication/create/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package create contains the implementation of the kubectl cnpg publication create command diff --git a/internal/cmd/plugin/logical/publication/create/publication.go b/internal/cmd/plugin/logical/publication/create/publication.go index 9a6be06362..66f30e69fa 100644 --- a/internal/cmd/plugin/logical/publication/create/publication.go +++ b/internal/cmd/plugin/logical/publication/create/publication.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create @@ -56,7 +59,7 @@ type PublicationTarget interface { ToPublicationTargetSQL() string } -// PublicationTargetALLTables will publicate all tables +// PublicationTargetALLTables will publish all tables type PublicationTargetALLTables struct{} // ToPublicationTargetSQL implements the PublicationTarget interface @@ -64,7 +67,7 @@ func (PublicationTargetALLTables) ToPublicationTargetSQL() string { return "FOR ALL TABLES" } -// PublicationTargetPublicationObjects publicates multiple publication objects +// PublicationTargetPublicationObjects publishes multiple publication objects type PublicationTargetPublicationObjects struct { PublicationObjects []PublicationObject } @@ -85,15 +88,15 @@ func (objs *PublicationTargetPublicationObjects) ToPublicationTargetSQL() string return result } -// PublicationObject represent an object to publicate +// PublicationObject represent an object to publish type PublicationObject interface { - // Create the SQL statement to publicate this object + // ToPublicationObjectSQL creates the SQL statement to publish this object ToPublicationObjectSQL() string } -// PublicationObjectSchema will publicate all the tables in a certain schema +// PublicationObjectSchema will publish all the tables in a certain schema type PublicationObjectSchema struct { - // The schema to publicate + // The schema to publish SchemaName string } @@ -102,9 +105,9 @@ func (obj PublicationObjectSchema) ToPublicationObjectSQL() string { return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.SchemaName}.Sanitize()) } -// PublicationObjectTableExpression will publicate the passed table expression +// PublicationObjectTableExpression will publish the passed table expression type PublicationObjectTableExpression struct { - // The table expression to publicate + // The table expression to publish TableExpressions []string } diff --git a/internal/cmd/plugin/logical/publication/create/publication_test.go b/internal/cmd/plugin/logical/publication/create/publication_test.go index 60081a0b0e..a6f11ee92c 100644 --- a/internal/cmd/plugin/logical/publication/create/publication_test.go +++ b/internal/cmd/plugin/logical/publication/create/publication_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create @@ -22,14 +25,14 @@ import ( ) var _ = Describe("create publication SQL generator", func() { - It("can publicate all tables", func() { + It("can publish all tables", func() { Expect(PublicationCmdBuilder{ PublicationName: "app", PublicationTarget: PublicationTargetALLTables{}, }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES`)) }) - It("can publicate all tables with custom parameters", func() { + It("can publish all tables with custom parameters", func() { Expect(PublicationCmdBuilder{ PublicationName: "app", PublicationTarget: PublicationTargetALLTables{}, @@ -37,7 +40,7 @@ var _ = Describe("create publication SQL generator", func() { }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES WITH (publish='insert')`)) }) - It("can publicate a list of tables via multiple publication objects", func() { + It("can publish a list of tables via multiple publication objects", func() { // This is supported from PG 15 Expect(PublicationCmdBuilder{ PublicationName: "app", @@ -54,7 +57,7 @@ var _ = Describe("create publication SQL generator", func() { }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLE a, TABLE b`)) }) - It("can publicate a list of tables via multiple table expressions", func() { + It("can publish a list of tables via multiple table expressions", func() { // This is supported in PG < 15 Expect(PublicationCmdBuilder{ PublicationName: "app", diff --git a/internal/cmd/plugin/logical/publication/create/suite_test.go b/internal/cmd/plugin/logical/publication/create/suite_test.go index b87263eec2..2bb5d081e4 100644 --- a/internal/cmd/plugin/logical/publication/create/suite_test.go +++ b/internal/cmd/plugin/logical/publication/create/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/publication/doc.go b/internal/cmd/plugin/logical/publication/doc.go index f83e7f3303..d9c8f12919 100644 --- a/internal/cmd/plugin/logical/publication/doc.go +++ b/internal/cmd/plugin/logical/publication/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package publication contains the implementation of the kubectl cnpg publication command diff --git a/internal/cmd/plugin/logical/publication/drop/cmd.go b/internal/cmd/plugin/logical/publication/drop/cmd.go index 6d27c7a955..0d1c3b689c 100644 --- a/internal/cmd/plugin/logical/publication/drop/cmd.go +++ b/internal/cmd/plugin/logical/publication/drop/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package drop @@ -35,7 +38,7 @@ func NewCmd() *cobra.Command { var dryRun bool publicationDropCmd := &cobra.Command{ - Use: "drop cluster_name", + Use: "drop CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/logical/publication/drop/doc.go b/internal/cmd/plugin/logical/publication/drop/doc.go index 42d91045fb..517d8434f6 100644 --- a/internal/cmd/plugin/logical/publication/drop/doc.go +++ b/internal/cmd/plugin/logical/publication/drop/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package drop contains the implementation of the kubectl cnpg publication drop command diff --git a/internal/cmd/plugin/logical/subscription/cmd.go b/internal/cmd/plugin/logical/subscription/cmd.go index 7c46b96fc7..438091a0c5 100644 --- a/internal/cmd/plugin/logical/subscription/cmd.go +++ b/internal/cmd/plugin/logical/subscription/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package subscription @@ -19,6 +22,7 @@ package subscription import ( "github.com/spf13/cobra" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/create" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/drop" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/syncsequences" @@ -27,8 +31,9 @@ import ( // NewCmd initializes the subscription command func NewCmd() *cobra.Command { subscriptionCmd := &cobra.Command{ - Use: "subscription", - Short: "Logical subscription management commands", + Use: "subscription", + Short: "Logical subscription management commands", + GroupID: plugin.GroupIDDatabase, } subscriptionCmd.AddCommand(create.NewCmd()) subscriptionCmd.AddCommand(drop.NewCmd()) diff --git a/internal/cmd/plugin/logical/subscription/create/cmd.go b/internal/cmd/plugin/logical/subscription/create/cmd.go index 9ca7508f9e..93b43def2f 100644 --- a/internal/cmd/plugin/logical/subscription/create/cmd.go +++ b/internal/cmd/plugin/logical/subscription/create/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create @@ -37,7 +40,7 @@ func NewCmd() *cobra.Command { var dryRun bool subscriptionCreateCmd := &cobra.Command{ - Use: "create cluster_name", + Use: "create CLUSTER", Short: "create a logical replication subscription", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { diff --git a/internal/cmd/plugin/logical/subscription/create/doc.go b/internal/cmd/plugin/logical/subscription/create/doc.go index a2918f60e6..e5842f21d3 100644 --- a/internal/cmd/plugin/logical/subscription/create/doc.go +++ b/internal/cmd/plugin/logical/subscription/create/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package create contains the implementation of the kubectl cnpg subscription create command diff --git a/internal/cmd/plugin/logical/subscription/create/subscription.go b/internal/cmd/plugin/logical/subscription/create/subscription.go index f545364116..ede10137be 100644 --- a/internal/cmd/plugin/logical/subscription/create/subscription.go +++ b/internal/cmd/plugin/logical/subscription/create/subscription.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/subscription/doc.go b/internal/cmd/plugin/logical/subscription/doc.go index 63b527ec2e..6185f99274 100644 --- a/internal/cmd/plugin/logical/subscription/doc.go +++ b/internal/cmd/plugin/logical/subscription/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package subscription contains the implementation of the kubectl cnpg subscription command diff --git a/internal/cmd/plugin/logical/subscription/drop/cmd.go b/internal/cmd/plugin/logical/subscription/drop/cmd.go index bba02c68bc..7e94df017b 100644 --- a/internal/cmd/plugin/logical/subscription/drop/cmd.go +++ b/internal/cmd/plugin/logical/subscription/drop/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package drop @@ -34,7 +37,7 @@ func NewCmd() *cobra.Command { var dryRun bool subscriptionDropCmd := &cobra.Command{ - Use: "drop cluster_name", + Use: "drop CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/logical/subscription/drop/doc.go b/internal/cmd/plugin/logical/subscription/drop/doc.go index a5af8cbcef..340d88f503 100644 --- a/internal/cmd/plugin/logical/subscription/drop/doc.go +++ b/internal/cmd/plugin/logical/subscription/drop/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package drop contains the implementatoin of the cnpg subscription drop command diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go index a73fe0ff09..2996f96d1e 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package syncsequences @@ -36,7 +39,7 @@ func NewCmd() *cobra.Command { var offset int syncSequencesCmd := &cobra.Command{ - Use: "sync-sequences cluster_name", + Use: "sync-sequences CLUSTER", Short: "synchronize the sequences from the source database", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { @@ -57,7 +60,8 @@ func NewCmd() *cobra.Command { &cluster, ) if err != nil { - return fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return fmt.Errorf("cluster %s not found in namespace %s: %w", + clusterName, plugin.Namespace, err) } if len(dbName) == 0 { diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/doc.go b/internal/cmd/plugin/logical/subscription/syncsequences/doc.go index 4abc977de1..7b3f79b605 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/doc.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package syncsequences contains the implementation of the diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/get.go b/internal/cmd/plugin/logical/subscription/syncsequences/get.go index 070b45b990..460ac424a4 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/get.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/get.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package syncsequences contains the implementation of the diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/update.go b/internal/cmd/plugin/logical/subscription/syncsequences/update.go index da6a7315d0..f706593a50 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/update.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/update.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package syncsequences @@ -44,7 +47,7 @@ func CreateSyncScript(source, destination SequenceMap, offset int) string { } script += fmt.Sprintf( - "SELECT setval(%s, %v);\n", + "SELECT pg_catalog.setval(%s, %v);\n", pq.QuoteLiteral(name), sqlTargetValue) } diff --git a/internal/cmd/plugin/logs/cluster.go b/internal/cmd/plugin/logs/cluster.go index 0249ab9357..788b1916f4 100644 --- a/internal/cmd/plugin/logs/cluster.go +++ b/internal/cmd/plugin/logs/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs @@ -26,7 +29,7 @@ func clusterCmd() *cobra.Command { cl := clusterLogs{} cmd := &cobra.Command{ - Use: "cluster ", + Use: "cluster CLUSTER", Short: "Logs for cluster's pods", Long: "Collects the logs for all pods in a cluster into a single stream or outputFile", Args: plugin.RequiresArguments(1), diff --git a/internal/cmd/plugin/logs/cluster_logs.go b/internal/cmd/plugin/logs/cluster_logs.go index 09d5bd32d6..4c2d7a4978 100644 --- a/internal/cmd/plugin/logs/cluster_logs.go +++ b/internal/cmd/plugin/logs/cluster_logs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs @@ -31,7 +34,7 @@ import ( cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/podlogs" ) // clusterLogs contains the options and context to retrieve cluster logs @@ -55,7 +58,7 @@ func getCluster(cl clusterLogs) (*cnpgv1.Cluster, error) { return &cluster, err } -func getStreamClusterLogs(cluster *cnpgv1.Cluster, cl clusterLogs) logs.ClusterStreamingRequest { +func getStreamClusterLogs(cluster *cnpgv1.Cluster, cl clusterLogs) podlogs.ClusterWriter { var sinceTime *metav1.Time var tail *int64 if cl.timestamp { @@ -64,7 +67,7 @@ func getStreamClusterLogs(cluster *cnpgv1.Cluster, cl clusterLogs) logs.ClusterS if cl.tailLines >= 0 { tail = &cl.tailLines } - return logs.ClusterStreamingRequest{ + return podlogs.ClusterWriter{ Cluster: cluster, Options: &corev1.PodLogOptions{ Timestamps: cl.timestamp, diff --git a/internal/cmd/plugin/logs/cluster_logs_test.go b/internal/cmd/plugin/logs/cluster_logs_test.go index 326ab2a313..25c6be2d05 100644 --- a/internal/cmd/plugin/logs/cluster_logs_test.go +++ b/internal/cmd/plugin/logs/cluster_logs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,17 +13,18 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs import ( - "context" "path" - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" - fake2 "k8s.io/client-go/kubernetes/fake" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeClient "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -34,18 +36,18 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("Get the logs", func() { +var _ = Describe("Get the logs", Ordered, func() { namespace := "default" clusterName := "test-cluster" - pod := &v1.Pod{ - ObjectMeta: v12.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName + "-1", }, } - client := fake2.NewSimpleClientset(pod) + client := fakeClient.NewClientset(pod) cluster := &apiv1.Cluster{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName, Labels: map[string]string{ @@ -54,20 +56,24 @@ var _ = Describe("Get the logs", func() { }, Spec: apiv1.ClusterSpec{}, } - cl := clusterLogs{ - ctx: context.TODO(), - clusterName: clusterName, - namespace: namespace, - follow: true, - timestamp: true, - tailLines: -1, - client: client, - } + var cl clusterLogs plugin.Client = fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster). Build() + BeforeEach(func(ctx SpecContext) { + cl = clusterLogs{ + ctx: ctx, + clusterName: clusterName, + namespace: namespace, + follow: true, + timestamp: true, + tailLines: -1, + client: client, + } + }) + It("should get a proper cluster", func() { cluster, err := getCluster(cl) Expect(err).ToNot(HaveOccurred()) @@ -95,18 +101,24 @@ var _ = Describe("Get the logs", func() { }) It("should get the proper stream for logs", func() { + PauseOutputInterception() err := followCluster(cl) + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("should save the logs to file", func() { + tempDir := GinkgoT().TempDir() cl.outputFile = path.Join(tempDir, "test-file.logs") + PauseOutputInterception() err := saveClusterLogs(cl) + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("should fail if can't write a file", func() { - cl.outputFile = "/this-does-not-exist/test-file.log" + tempDir := GinkgoT().TempDir() + cl.outputFile = path.Join(tempDir, "this-does-not-exist/test-file.log") err := saveClusterLogs(cl) Expect(err).To(HaveOccurred()) }) diff --git a/internal/cmd/plugin/logs/cluster_test.go b/internal/cmd/plugin/logs/cluster_test.go index 1df2081fd4..b36f8ce066 100644 --- a/internal/cmd/plugin/logs/cluster_test.go +++ b/internal/cmd/plugin/logs/cluster_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs import ( - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeClient "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -35,14 +38,14 @@ var _ = Describe("Test the command", func() { clusterName := "test-cluster" namespace := "default" var cluster *apiv1.Cluster - pod := &v1.Pod{ - ObjectMeta: v12.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName + "-1", }, } cluster = &apiv1.Cluster{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName, Labels: map[string]string{ @@ -53,7 +56,7 @@ var _ = Describe("Test the command", func() { } plugin.Namespace = namespace - plugin.ClientInterface = fakeClient.NewSimpleClientset(pod) + plugin.ClientInterface = fakeClient.NewClientset(pod) plugin.Client = fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster). @@ -62,14 +65,18 @@ var _ = Describe("Test the command", func() { It("should not fail, with cluster name as argument", func() { cmd := clusterCmd() cmd.SetArgs([]string{clusterName}) + PauseOutputInterception() err := cmd.Execute() + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("could follow the logs", func() { cmd := clusterCmd() cmd.SetArgs([]string{clusterName, "-f"}) + PauseOutputInterception() err := cmd.Execute() + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/internal/cmd/plugin/logs/cmd.go b/internal/cmd/plugin/logs/cmd.go index 155cc313bb..94666c66e5 100644 --- a/internal/cmd/plugin/logs/cmd.go +++ b/internal/cmd/plugin/logs/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,22 +13,29 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs import ( "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logs/pretty" ) -// NewCmd creates the new "report" command +// NewCmd creates the new "logs" command func NewCmd() *cobra.Command { logsCmd := &cobra.Command{ - Use: "logs cluster", - Short: "Collect cluster logs", + Use: "logs", + Short: "Logging utilities", + GroupID: plugin.GroupIDTroubleshooting, } logsCmd.AddCommand(clusterCmd()) + logsCmd.AddCommand(pretty.NewCmd()) return logsCmd } diff --git a/internal/cmd/plugin/logs/cmd_test.go b/internal/cmd/plugin/logs/cmd_test.go index 24fbdecd04..1be90c31ff 100644 --- a/internal/cmd/plugin/logs/cmd_test.go +++ b/internal/cmd/plugin/logs/cmd_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs @@ -24,7 +27,7 @@ import ( var _ = Describe("Get the proper command", func() { It("get the proper command", func() { logsCmd := NewCmd() - Expect(logsCmd.Use).To(BeEquivalentTo("logs cluster")) - Expect(logsCmd.Short).To(BeEquivalentTo("Collect cluster logs")) + Expect(logsCmd.Use).To(BeEquivalentTo("logs")) + Expect(logsCmd.Short).To(BeEquivalentTo("Logging utilities")) }) }) diff --git a/internal/cmd/plugin/logs/doc.go b/internal/cmd/plugin/logs/doc.go index e4fcac3519..682102cecd 100644 --- a/internal/cmd/plugin/logs/doc.go +++ b/internal/cmd/plugin/logs/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logs implements the kubectl-cnpg logs command diff --git a/internal/cmd/plugin/logs/pretty/doc.go b/internal/cmd/plugin/logs/pretty/doc.go new file mode 100644 index 0000000000..12c350843c --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package pretty contains the implementation of `kubectl cnpg logs pretty` +package pretty diff --git a/internal/cmd/plugin/logs/pretty/log_level.go b/internal/cmd/plugin/logs/pretty/log_level.go new file mode 100644 index 0000000000..5f825c066d --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/log_level.go @@ -0,0 +1,86 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package pretty + +import ( + "errors" + + "github.com/cloudnative-pg/machinery/pkg/log" + "go.uber.org/zap/zapcore" +) + +// ErrUnknownLogLevel is returned when an unknown string representation +// of a log level is used +var ErrUnknownLogLevel = errors.New("unknown log level") + +// LogLevel represents a log level such as error, warning, info, debug, or trace. +type LogLevel string + +// Less returns true when the received event is less than +// the passed one +func (l LogLevel) Less(o LogLevel) bool { + return l.toInt() < o.toInt() +} + +// String is the string representation of this level +func (l LogLevel) String() string { + return string(l) +} + +// Type is the data type to be used for this type +// when used as a flag +func (l LogLevel) Type() string { + return "string" +} + +// Set sets a log level given its string representation +func (l *LogLevel) Set(val string) error { + switch val { + case log.ErrorLevelString, log.WarningLevelString, log.InfoLevelString, log.DebugLevelString, log.TraceLevelString: + *l = LogLevel(val) + return nil + + default: + return ErrUnknownLogLevel + } +} + +// toInt returns the corresponding zapcore level +func (l LogLevel) toInt() zapcore.Level { + switch l { + case log.ErrorLevelString: + return log.ErrorLevel + + case log.WarningLevelString: + return log.WarningLevel + + case log.InfoLevelString: + return log.InfoLevel + + case log.DebugLevelString: + return log.DebugLevel + + case log.TraceLevelString: + return log.TraceLevel + + default: + return log.ErrorLevel + } +} diff --git a/internal/cmd/plugin/logs/pretty/log_record.go b/internal/cmd/plugin/logs/pretty/log_record.go new file mode 100644 index 0000000000..edb8bf89a4 --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/log_record.go @@ -0,0 +1,173 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package pretty + +import ( + "encoding/json" + "fmt" + "hash/fnv" + "io" + "strings" + + "github.com/logrusorgru/aurora/v4" +) + +// colorizers is a list of functions that can be used to decorate +// pod names +var colorizers = []func(any) aurora.Value{ + aurora.Red, + aurora.Green, + aurora.Magenta, + aurora.Cyan, + aurora.Yellow, +} + +// logRecord is the portion of the structure of a CNPG log +// that is handled by the beautifier +type logRecord struct { + Level LogLevel `json:"level"` + Msg string `json:"msg"` + Logger string `json:"logger"` + TS string `json:"ts"` + LoggingPod string `json:"logging_pod"` + Record struct { + ErrorSeverity string `json:"error_severity"` + Message string `json:"message"` + } `json:"record,omitempty"` + + AdditionalFields map[string]any +} + +func newLogRecordFromBytes(bytes []byte) (*logRecord, error) { + var record logRecord + + if err := json.Unmarshal(bytes, &record); err != nil { + return nil, fmt.Errorf("decoding log record: %w", err) + } + + extraFields := make(map[string]any) + if err := json.Unmarshal(bytes, &extraFields); err != nil { + return nil, fmt.Errorf("decoding extra fields: %w", err) + } + + delete(extraFields, "level") + delete(extraFields, "pipe") + delete(extraFields, "msg") + delete(extraFields, "logger") + delete(extraFields, "ts") + delete(extraFields, "logging_pod") + delete(extraFields, "record") + delete(extraFields, "controllerGroup") + delete(extraFields, "controllerKind") + delete(extraFields, "Cluster") + + record.AdditionalFields = extraFields + return &record, nil +} + +// normalize converts the error_severity into one of the acceptable +// LogLevel values +func (record *logRecord) normalize() { + message := record.Msg + level := string(record.Level) + + if record.Msg == "record" { + switch record.Record.ErrorSeverity { + case "DEBUG1", "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5": + level = "trace" + + case "INFO", "NOTICE", "LOG": + level = "info" + + case "WARNING": + level = "warning" + + case "ERROR", "FATAL", "PANIC": + level = "error" + + default: + level = "info" + } + + message = record.Record.Message + } + + record.Msg = message + record.Level = LogLevel(level) +} + +// print dumps the formatted record to the specified writer +func (record *logRecord) print(writer io.Writer, verbosity int) error { + const jsonPrefix = " " + const jsonIndent = " " + const maxRowLen = 100 + + message := record.Msg + level := string(record.Level) + + if record.Msg == "record" { + level = record.Record.ErrorSeverity + message = record.Record.Message + } + + additionalFields := "" + if len(record.AdditionalFields) > 0 { + v, _ := json.MarshalIndent(record.AdditionalFields, jsonPrefix, jsonIndent) + additionalFields = string(v) + } + + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(record.LoggingPod)) + colorIdx := int(hasher.Sum32()) % len(colorizers) + + ts := record.TS + if verbosity == 0 && len(ts) > 23 { + ts = record.TS[:23] + } + if verbosity > 0 { + ts = fmt.Sprintf("%-30s", ts) + } + + if verbosity == 0 { + firstLine, suffix, _ := strings.Cut(message, "\n") + if len(firstLine) > maxRowLen || len(suffix) > 0 { + if len(firstLine) > maxRowLen { + firstLine = firstLine[:maxRowLen] + } + firstLine += "..." + } + message = firstLine + } + + _, err := fmt.Fprintln( + writer, + ts, + fmt.Sprintf("%-8s", aurora.Blue(strings.ToUpper(level))), + colorizers[colorIdx](record.LoggingPod), + fmt.Sprintf("%-16s", aurora.Blue(record.Logger)), + message) + if len(additionalFields) > 0 && verbosity > 1 { + _, err = fmt.Fprintln( + writer, + jsonPrefix+additionalFields, + ) + } + return err +} diff --git a/internal/cmd/plugin/logs/pretty/pretty.go b/internal/cmd/plugin/logs/pretty/pretty.go new file mode 100644 index 0000000000..52e4e7384d --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/pretty.go @@ -0,0 +1,255 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package pretty + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "slices" + "sync" + "time" + + "github.com/cloudnative-pg/machinery/pkg/stringset" + "github.com/logrusorgru/aurora/v4" + "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" +) + +type prettyCmd struct { + loggers *stringset.Data + pods *stringset.Data + groupSize int + verbosity int + minLevel LogLevel +} + +// NewCmd creates a new `kubectl cnpg logs pretty` command +func NewCmd() *cobra.Command { + var loggers, pods []string + var sortingGroupSize, verbosity int + bf := prettyCmd{} + + cmd := &cobra.Command{ + Use: "pretty", + Short: "Prettify CNPG logs", + Long: "Reads CNPG logs from standard input and pretty-prints them for human consumption", + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp + }, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + bf.loggers = stringset.From(loggers) + bf.pods = stringset.From(pods) + bf.groupSize = sortingGroupSize + bf.verbosity = verbosity + + recordChannel := make(chan logRecord) + recordGroupsChannel := make(chan []logRecord) + + var wait sync.WaitGroup + + wait.Add(1) + go func() { + bf.decode(cmd.Context(), os.Stdin, recordChannel) + wait.Done() + }() + + wait.Add(1) + go func() { + bf.group(cmd.Context(), recordChannel, recordGroupsChannel) + wait.Done() + }() + + wait.Add(1) + go func() { + bf.write(cmd.Context(), recordGroupsChannel, os.Stdout) + wait.Done() + }() + + wait.Wait() + return nil + }, + } + + cmd.Flags().IntVar(&sortingGroupSize, "sorting-group-size", 1000, + "The maximum size of the window where logs are collected for sorting") + cmd.Flags().StringSliceVar(&loggers, "loggers", nil, + "The list of loggers to receive. Defaults to all.") + cmd.Flags().StringSliceVar(&pods, "pods", nil, + "The list of pods to receive from. Defaults to all.") + cmd.Flags().Var(&bf.minLevel, "min-level", + `Hides the messages whose log level is less important than the specified one. +Should be empty or one of error, warning, info, debug, or trace.`) + cmd.Flags().CountVarP(&verbosity, "verbosity", "v", + "The logs verbosity level. More verbose means more information will be printed") + + return cmd +} + +// decode progressively decodes the logs +func (bf *prettyCmd) decode(ctx context.Context, reader io.Reader, recordChannel chan<- logRecord) { + scanner := bufio.NewScanner(reader) + scanner.Buffer(make([]byte, 0, 4096), 1024*1024) + + for scanner.Scan() { + select { + case <-ctx.Done(): + return + default: + } + + record, err := newLogRecordFromBytes(scanner.Bytes()) + if err != nil { + _, _ = fmt.Fprintln( + os.Stderr, + aurora.Red(fmt.Sprintf("JSON syntax error (%s)", err.Error())), + scanner.Text()) + continue + } + + record.normalize() + + if !bf.isRecordRelevant(record) { + continue + } + + recordChannel <- *record + } + + close(recordChannel) +} + +// group transforms a stream of logs into a stream of log groups, so that the groups +// can then be sorted +func (bf *prettyCmd) group(ctx context.Context, logChannel <-chan logRecord, groupChannel chan<- []logRecord) { + bufferArray := make([]logRecord, bf.groupSize) + + buffer := bufferArray[0:0] + + pushLogGroup := func() { + if len(buffer) == 0 { + return + } + + bufferCopy := make([]logRecord, len(buffer)) + copy(bufferCopy, buffer) + groupChannel <- bufferCopy + + buffer = bufferArray[0:0] + } + +logLoop: + for { + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case <-ctx.Done(): + break logLoop + + case <-timer.C: + pushLogGroup() + + case logRecord, ok := <-logChannel: + if !ok { + break logLoop + } + + buffer = append(buffer, logRecord) + if len(buffer) == bf.groupSize { + pushLogGroup() + } + } + } + + pushLogGroup() + close(groupChannel) +} + +// write writes the logs on the output +func (bf *prettyCmd) write(ctx context.Context, recordGroupChannel <-chan []logRecord, writer io.Writer) { + logRecordComparison := func(l1, l2 logRecord) int { + if l1.TS < l2.TS { + return -1 + } else if l1.TS > l2.TS { + return 1 + } + + if l1.LoggingPod < l2.LoggingPod { + return -1 + } else if l1.LoggingPod == l2.LoggingPod { + return 0 + } + + return 1 + } + firstGroup := true + +logLoop: + for { + select { + case <-ctx.Done(): + break logLoop + + case logGroupRecord, ok := <-recordGroupChannel: + if !ok { + break logLoop + } + + slices.SortFunc(logGroupRecord, logRecordComparison) + + if !firstGroup { + _, _ = writer.Write([]byte("---\n")) + } + for _, record := range logGroupRecord { + if err := record.print(writer, bf.verbosity); err != nil { + bf.emergencyLog(err, "Dumping a log entry") + } + } + firstGroup = false + } + } +} + +// isRecordRelevant is true when the passed log record is matched +// by the filters set by the user +func (bf *prettyCmd) isRecordRelevant(record *logRecord) bool { + if bf.loggers.Len() > 0 && !bf.loggers.Has(record.Logger) { + return false + } + + if bf.pods.Len() > 0 && !bf.pods.Has(record.LoggingPod) { + return false + } + + if bf.minLevel != "" && record.Level.Less(bf.minLevel) { + return false + } + + return true +} + +func (bf *prettyCmd) emergencyLog(err error, msg string) { + fmt.Println(aurora.Red("ERROR"), err.Error(), msg) +} diff --git a/internal/cmd/plugin/logs/suite_test.go b/internal/cmd/plugin/logs/suite_test.go index c5bd148d2d..7bb114392f 100644 --- a/internal/cmd/plugin/logs/suite_test.go +++ b/internal/cmd/plugin/logs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,32 +13,20 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs import ( - "os" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var tempDir string - func TestPgbench(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Logs Suite") } - -var _ = BeforeSuite(func() { - var err error - tempDir, err = os.MkdirTemp(os.TempDir(), "logs_") - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := os.RemoveAll(tempDir) - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/internal/cmd/plugin/maintenance/cmd.go b/internal/cmd/plugin/maintenance/cmd.go index e0e215fbfc..1d6c4f5b58 100644 --- a/internal/cmd/plugin/maintenance/cmd.go +++ b/internal/cmd/plugin/maintenance/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package maintenance @@ -31,12 +34,13 @@ func NewCmd() *cobra.Command { confirmationRequired bool maintenanceCmd := &cobra.Command{ - Use: "maintenance [set/unset]", - Short: "Sets or removes maintenance mode from clusters", + Use: "maintenance [set/unset]", + Short: "Sets or removes maintenance mode from clusters", + GroupID: plugin.GroupIDCluster, } maintenanceCmd.AddCommand(&cobra.Command{ - Use: "set [cluster]", + Use: "set CLUSTER", Short: "Sets maintenance mode", Long: "This command will set maintenance mode on a single cluster or on all clusters " + "in the current namespace if not specified differently through flags", @@ -57,7 +61,7 @@ func NewCmd() *cobra.Command { }) maintenanceCmd.AddCommand(&cobra.Command{ - Use: "unset [cluster]", + Use: "unset CLUSTER", Short: "Removes maintenance mode", Long: "This command will unset maintenance mode on a single cluster or on all clusters " + "in the current namespace if not specified differently through flags", diff --git a/internal/cmd/plugin/maintenance/maintenance.go b/internal/cmd/plugin/maintenance/maintenance.go index 7060d87375..bc5da6b797 100644 --- a/internal/cmd/plugin/maintenance/maintenance.go +++ b/internal/cmd/plugin/maintenance/maintenance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package maintenance implements the kubectl-cnpg maintenance sub-command @@ -83,7 +86,7 @@ func Maintenance(ctx context.Context, for _, item := range clusterList.Items { err := patchNodeMaintenanceWindow(ctx, item, setInProgressTo, reusePVC) if err != nil { - return fmt.Errorf("unable to set progress to cluster %v in namespace %v", item.Name, item.Namespace) + return fmt.Errorf("unable to set progress to cluster %v in namespace %v: %w", item.Name, item.Namespace, err) } } diff --git a/internal/cmd/plugin/output.go b/internal/cmd/plugin/output.go index 64f08f21f7..37d9c417da 100644 --- a/internal/cmd/plugin/output.go +++ b/internal/cmd/plugin/output.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/pgadmin/cmd.go b/internal/cmd/plugin/pgadmin/cmd.go index 920d56203d..f4b85a6601 100644 --- a/internal/cmd/plugin/pgadmin/cmd.go +++ b/internal/cmd/plugin/pgadmin/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin @@ -76,9 +79,10 @@ func NewCmd() *cobra.Command { pgadminCmd := &cobra.Command{ Use: "pgadmin4 [name]", - Short: "Creates a pgadmin deployment", + Short: "Creates a pgAdmin deployment", Args: cobra.MinimumNArgs(1), - Long: `Creates a pgadmin deployment configured to work with a CNPG Cluster.`, + Long: `Creates a pgAdmin deployment configured to work with a CNPG Cluster.`, + GroupID: plugin.GroupIDMiscellaneous, Example: pgadminExample, RunE: func(_ *cobra.Command, args []string) error { ctx := context.Background() diff --git a/internal/cmd/plugin/pgadmin/doc.go b/internal/cmd/plugin/pgadmin/doc.go index 4384798775..84704a7c2f 100644 --- a/internal/cmd/plugin/pgadmin/doc.go +++ b/internal/cmd/plugin/pgadmin/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgadmin implements the creation of a pgadmin deployment diff --git a/internal/cmd/plugin/pgadmin/pgadmin.go b/internal/cmd/plugin/pgadmin/pgadmin.go index 44a23f89c4..b5e09d427e 100644 --- a/internal/cmd/plugin/pgadmin/pgadmin.go +++ b/internal/cmd/plugin/pgadmin/pgadmin.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin @@ -26,6 +29,7 @@ import ( "github.com/sethvargo/go-password/password" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" @@ -273,6 +277,14 @@ func (cmd *command) generateDeployment() *appsv1.Deployment { Name: pgAdminPassFileVolumeName, MountPath: pgAdminPassFileVolumePath, }, + { + Name: "tmp", + MountPath: "/tmp", + }, + { + Name: "home", + MountPath: "/home/pgadmin", + }, }, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -303,6 +315,21 @@ func (cmd *command) generateDeployment() *appsv1.Deployment { }, }, }, + { + Name: "home", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: ptr.To(resource.MustParse("100Mi")), + }, + }, + }, }, }, }, diff --git a/internal/cmd/plugin/pgadmin/pgadmin_test.go b/internal/cmd/plugin/pgadmin/pgadmin_test.go index b1f4666b8c..0c9a4e19fd 100644 --- a/internal/cmd/plugin/pgadmin/pgadmin_test.go +++ b/internal/cmd/plugin/pgadmin/pgadmin_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin diff --git a/internal/cmd/plugin/pgadmin/suite_test.go b/internal/cmd/plugin/pgadmin/suite_test.go index 9add9d7e6d..e09bd27ada 100644 --- a/internal/cmd/plugin/pgadmin/suite_test.go +++ b/internal/cmd/plugin/pgadmin/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 3c601d48ba..5ace0e9487 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbench @@ -20,6 +23,8 @@ import ( "fmt" "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd initializes the pgBench command @@ -27,10 +32,11 @@ func NewCmd() *cobra.Command { run := &pgBenchRun{} pgBenchCmd := &cobra.Command{ - Use: "pgbench [cluster] [-- pgBenchCommandArgs...]", + Use: "pgbench CLUSTER [-- PGBENCH_COMMAND_ARGS...]", Short: "Creates a pgbench job", Args: validateCommandArgs, Long: "Creates a pgbench job to run against the specified Postgres Cluster.", + GroupID: plugin.GroupIDMiscellaneous, Example: jobExample, RunE: func(cmd *cobra.Command, args []string) error { run.clusterName = args[0] @@ -44,14 +50,14 @@ func NewCmd() *cobra.Command { &run.jobName, "job-name", "", - "Name of the job, defaulting to: -pgbench-xxxx", + "Name of the job, defaulting to: CLUSTER-pgbench-xxxx", ) pgBenchCmd.Flags().StringVar( &run.jobName, "pgbench-job-name", "", - "Name of the job, defaulting to: -pgbench-xxxx", + "Name of the job, defaulting to: CLUSTER-pgbench-xxxx", ) pgBenchCmd.Flags().StringVar( @@ -61,6 +67,13 @@ func NewCmd() *cobra.Command { "The name of the database that will be used by pgbench. Defaults to: app", ) + pgBenchCmd.Flags().Int32Var( + &run.ttlSecondsAfterFinished, + "ttl", + 0, + "Time to live of the pgbench job. Defaults to no TTL.", + ) + pgBenchCmd.Flags().BoolVar( &run.dryRun, "dry-run", @@ -85,7 +98,7 @@ func validateCommandArgs(cmd *cobra.Command, args []string) error { } if cmd.ArgsLenAtDash() > 1 { - return fmt.Errorf("pgBenchCommands should be passed after -- delimiter") + return fmt.Errorf("PGBENCH_COMMAND_ARGS should be passed after the -- delimiter") } return nil diff --git a/internal/cmd/plugin/pgbench/cmd_test.go b/internal/cmd/plugin/pgbench/cmd_test.go index 68ec5ebf6f..db58b5be4e 100644 --- a/internal/cmd/plugin/pgbench/cmd_test.go +++ b/internal/cmd/plugin/pgbench/cmd_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbench @@ -27,7 +30,7 @@ var _ = Describe("NewCmd", func() { It("should create a cobra.Command with correct defaults", func() { cmd := NewCmd() - Expect(cmd.Use).To(Equal("pgbench [cluster] [-- pgBenchCommandArgs...]")) + Expect(cmd.Use).To(Equal("pgbench CLUSTER [-- PGBENCH_COMMAND_ARGS...]")) Expect(cmd.Short).To(Equal("Creates a pgbench job")) Expect(cmd.Long).To(Equal("Creates a pgbench job to run against the specified Postgres Cluster.")) Expect(cmd.Example).To(Equal(jobExample)) @@ -62,7 +65,7 @@ var _ = Describe("NewCmd", func() { testRun.dbName, _ = cmd.Flags().GetString("db-name") testRun.dryRun, _ = cmd.Flags().GetBool("dry-run") testRun.nodeSelector, _ = cmd.Flags().GetStringSlice("node-selector") - + testRun.ttlSecondsAfterFinished, _ = cmd.Flags().GetInt32("ttl") testRun.clusterName = args[0] testRun.pgBenchCommandArgs = args[1:] return nil @@ -75,6 +78,7 @@ var _ = Describe("NewCmd", func() { "--db-name=mydb", "--dry-run=true", "--node-selector=label=value", + "--ttl=86400", "arg1", "arg2", } @@ -91,6 +95,7 @@ var _ = Describe("NewCmd", func() { Expect(testRun.dbName).To(Equal("mydb")) Expect(testRun.dryRun).To(BeTrue()) Expect(testRun.nodeSelector).To(Equal([]string{"label=value"})) + Expect(testRun.ttlSecondsAfterFinished).To(Equal(int32(86400))) Expect(testRun.pgBenchCommandArgs).To(Equal([]string{"arg1", "arg2"})) }) }) diff --git a/internal/cmd/plugin/pgbench/doc.go b/internal/cmd/plugin/pgbench/doc.go index 1f5dcc9c3c..a4269660f5 100644 --- a/internal/cmd/plugin/pgbench/doc.go +++ b/internal/cmd/plugin/pgbench/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbench implements the pgbench job creation diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index a1b92eec4f..a9ec47f5af 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbench implements the kubectl-cnpg pgbench sub-command @@ -34,12 +37,13 @@ import ( ) type pgBenchRun struct { - jobName string - clusterName string - dbName string - nodeSelector []string - pgBenchCommandArgs []string - dryRun bool + jobName string + clusterName string + dbName string + nodeSelector []string + pgBenchCommandArgs []string + dryRun bool + ttlSecondsAfterFinished int32 } const ( @@ -47,18 +51,22 @@ const ( ) var jobExample = ` - # Dry-run command with default values and clusterName "cluster-example" + # Dry-run command with default values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --dry-run - # Create a pgbench job with default values and clusterName "cluster-example" + # Create a pgbench job with default values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example - # Dry-run command with given values and clusterName "cluster-example" + # Dry-run command with given values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name --dry-run -- \ --time 30 --client 1 --jobs 1 - # Create a job with given values and clusterName "cluster-example" + # Create a job with given values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name -- \ + --time 30 --client 1 --jobs 1 + + # Create a job with given values on [cluster] "cluster-example". The job will be cleaned after 10 minutes. + kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name --ttl 600 -- \ --time 30 --client 1 --jobs 1` func (cmd *pgBenchRun) execute(ctx context.Context) error { @@ -120,12 +128,12 @@ func (cmd *pgBenchRun) buildNodeSelector() map[string]string { } func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { - clusterImageName := cluster.Spec.ImageName labels := map[string]string{ "pgBenchJob": cluster.Name, } - return &batchv1.Job{ - // To ensure we have manifest with Kind and APi in --dry-run + + result := &batchv1.Job{ + // To ensure we have manifest with Kind and API in --dry-run TypeMeta: metav1.TypeMeta{ APIVersion: "batch/v1", Kind: "Job", @@ -146,7 +154,7 @@ func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { Containers: []corev1.Container{ { Name: "pgbench", - Image: clusterImageName, + Image: cluster.Status.Image, ImagePullPolicy: corev1.PullAlways, Env: cmd.buildEnvVariables(), Command: []string{pgBenchKeyWord}, @@ -158,6 +166,12 @@ func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { }, }, } + + if cmd.ttlSecondsAfterFinished != 0 { + result.Spec.TTLSecondsAfterFinished = &cmd.ttlSecondsAfterFinished + } + + return result } func (cmd *pgBenchRun) buildEnvVariables() []corev1.EnvVar { diff --git a/internal/cmd/plugin/pgbench/suite_test.go b/internal/cmd/plugin/pgbench/suite_test.go index 4a0960b6eb..3333764f1c 100644 --- a/internal/cmd/plugin/pgbench/suite_test.go +++ b/internal/cmd/plugin/pgbench/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbench diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go index ba593533d7..67be9b9c3f 100644 --- a/internal/cmd/plugin/plugin.go +++ b/internal/cmd/plugin/plugin.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package plugin contains the common behaviors of the kubectl-cnpg subcommand @@ -37,12 +40,16 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) var ( // Namespace to operate in Namespace string + // KubeContext to operate with + KubeContext string + // NamespaceExplicitlyPassed indicates if the namespace was passed manually NamespaceExplicitlyPassed bool @@ -56,6 +63,24 @@ var ( ClientInterface kubernetes.Interface ) +const ( + // GroupIDAdmin represents an ID to group up CNPG commands + GroupIDAdmin = "admin" + + // GroupIDTroubleshooting represent an ID to group up troubleshooting + // commands + GroupIDTroubleshooting = "troubleshooting" + + // GroupIDCluster represents an ID to group up Postgres Cluster commands + GroupIDCluster = "cluster" + + // GroupIDDatabase represents an ID to group up Postgres Database commands + GroupIDDatabase = "db" + + // GroupIDMiscellaneous represents an ID to group up miscellaneous commands + GroupIDMiscellaneous = "misc" +) + // SetupKubernetesClient creates a k8s client to be used inside the kubectl-cnpg // utility func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error { @@ -78,18 +103,23 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error { return err } + KubeContext = *configFlags.Context + ClientInterface = kubernetes.NewForConfigOrDie(Config) - return nil + return utils.DetectSecurityContextConstraints(ClientInterface.Discovery()) } func createClient(cfg *rest.Config) error { var err error + scheme := runtime.NewScheme() _ = clientgoscheme.AddToScheme(scheme) _ = apiv1.AddToScheme(scheme) _ = storagesnapshotv1.AddToScheme(scheme) + cfg.UserAgent = fmt.Sprintf("kubectl-cnpg/v%s (%s)", versions.Version, versions.Info.Commit) + Client, err = client.New(cfg, client.Options{Scheme: scheme}) if err != nil { return err diff --git a/internal/cmd/plugin/plugin_test.go b/internal/cmd/plugin/plugin_test.go index b6cfebe70f..d0fa5b493c 100644 --- a/internal/cmd/plugin/plugin_test.go +++ b/internal/cmd/plugin/plugin_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin @@ -23,6 +26,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -30,8 +34,14 @@ import ( var _ = Describe("create client", func() { It("with given configuration", func() { + // createClient is not a pure function and as a side effect + // it will: + // - set the Client global variable + // - set the UserAgent field inside cfg err := createClient(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg.UserAgent).To(Equal("kubectl-cnpg/v" + versions.Version + " (" + versions.Info.Commit + ")")) Expect(Client).NotTo(BeNil()) }) }) diff --git a/internal/cmd/plugin/printer.go b/internal/cmd/plugin/printer.go index 47312bccd4..0b69c9d314 100644 --- a/internal/cmd/plugin/printer.go +++ b/internal/cmd/plugin/printer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go index b2bd7e134d..e9217ae7f7 100644 --- a/internal/cmd/plugin/promote/cmd.go +++ b/internal/cmd/plugin/promote/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promote @@ -29,9 +32,10 @@ import ( // NewCmd create the new "promote" subcommand func NewCmd() *cobra.Command { promoteCmd := &cobra.Command{ - Use: "promote [cluster] [node]", - Short: "Promote the pod named [cluster]-[node] or [node] to primary", - Args: plugin.RequiresArguments(2), + Use: "promote CLUSTER INSTANCE", + Short: "Promote the instance named CLUSTER-INSTANCE to primary", + GroupID: plugin.GroupIDCluster, + Args: plugin.RequiresArguments(2), RunE: func(_ *cobra.Command, args []string) error { ctx := context.Background() clusterName := args[0] @@ -39,7 +43,7 @@ func NewCmd() *cobra.Command { if _, err := strconv.Atoi(args[1]); err == nil { node = fmt.Sprintf("%s-%s", clusterName, node) } - return Promote(ctx, clusterName, node) + return Promote(ctx, plugin.Client, plugin.Namespace, clusterName, node) }, } diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go index 7b4ea964fa..4f181d71e4 100644 --- a/internal/cmd/plugin/promote/promote.go +++ b/internal/cmd/plugin/promote/promote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package promote implement the kubectl-cnpg promote command @@ -21,23 +24,24 @@ import ( "context" "fmt" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" v1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -// Promote command implementation -func Promote(ctx context.Context, clusterName string, serverName string) error { +// Promote promotes an instance in a cluster +func Promote(ctx context.Context, cli client.Client, + namespace, clusterName, serverName string, +) error { var cluster apiv1.Cluster // Get the Cluster object - err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster) + err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, &cluster) if err != nil { - return fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, namespace, err) } // If server name is equal to target primary, there is no need to promote @@ -49,22 +53,21 @@ func Promote(ctx context.Context, clusterName string, serverName string) error { // Check if the Pod exist var pod v1.Pod - err = plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: serverName}, &pod) + err = cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: serverName}, &pod) if err != nil { - return fmt.Errorf("new primary node %s not found in namespace %s", serverName, plugin.Namespace) + return fmt.Errorf("new primary node %s not found in namespace %s: %w", serverName, namespace, err) } - // The Pod exists, let's update status fields - origCluster := cluster.DeepCopy() - cluster.Status.TargetPrimary = serverName - cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() - if err := status.RegisterPhaseWithOrigCluster( - ctx, - plugin.Client, - &cluster, - origCluster, - apiv1.PhaseSwitchover, - fmt.Sprintf("Switching over to %v", serverName), + // The Pod exists, let's update the cluster's status with the new target primary + reconcileTargetPrimaryFunc := func(cluster *apiv1.Cluster) { + cluster.Status.TargetPrimary = serverName + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() + cluster.Status.Phase = apiv1.PhaseSwitchover + cluster.Status.PhaseReason = fmt.Sprintf("Switching over to %v", serverName) + } + if err := status.PatchWithOptimisticLock(ctx, cli, &cluster, + reconcileTargetPrimaryFunc, + status.SetClusterReadyCondition, ); err != nil { return err } diff --git a/internal/cmd/plugin/promote/promote_test.go b/internal/cmd/plugin/promote/promote_test.go new file mode 100644 index 0000000000..cb2ba8c950 --- /dev/null +++ b/internal/cmd/plugin/promote/promote_test.go @@ -0,0 +1,95 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package promote + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + k8client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("promote subcommand tests", func() { + var client k8client.Client + const namespace = "theNamespace" + BeforeEach(func() { + cluster1 := apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: namespace, + }, + Spec: apiv1.ClusterSpec{}, + Status: apiv1.ClusterStatus{ + CurrentPrimary: "cluster1-1", + TargetPrimary: "cluster1-1", + Phase: apiv1.PhaseHealthy, + Conditions: []metav1.Condition{ + { + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ClusterReady), + Message: "Cluster is Ready", + }, + }, + }, + } + newPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1-2", + Namespace: namespace, + }, + } + client = fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()). + WithObjects(&cluster1, &newPod).WithStatusSubresource(&cluster1).Build() + }) + + It("correctly sets the target primary and the phase if the target pod is present", func(ctx SpecContext) { + Expect(Promote(ctx, client, namespace, "cluster1", "cluster1-2")). + To(Succeed()) + var cl apiv1.Cluster + Expect(client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: "cluster1"}, &cl)). + To(Succeed()) + Expect(cl.Status.TargetPrimary).To(Equal("cluster1-2")) + Expect(cl.Status.Phase).To(Equal(apiv1.PhaseSwitchover)) + Expect(cl.Status.PhaseReason).To(Equal("Switching over to cluster1-2")) + Expect(meta.IsStatusConditionTrue(cl.Status.Conditions, string(apiv1.ConditionClusterReady))). + To(BeFalse()) + }) + + It("ignores the promotion if the target pod is missing", func(ctx SpecContext) { + err := Promote(ctx, client, namespace, "cluster1", "cluster1-missingPod") + Expect(err).To(HaveOccurred()) + var cl apiv1.Cluster + Expect(client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: "cluster1"}, &cl)). + To(Succeed()) + Expect(cl.Status.TargetPrimary).To(Equal("cluster1-1")) + Expect(cl.Status.Phase).To(Equal(apiv1.PhaseHealthy)) + Expect(meta.IsStatusConditionTrue(cl.Status.Conditions, string(apiv1.ConditionClusterReady))). + To(BeTrue()) + }) +}) diff --git a/internal/cmd/plugin/promote/suite_test.go b/internal/cmd/plugin/promote/suite_test.go new file mode 100644 index 0000000000..566770d356 --- /dev/null +++ b/internal/cmd/plugin/promote/suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package promote + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestPlugin(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Promote plugin Suite") +} diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go index 3910021464..b471109e80 100644 --- a/internal/cmd/plugin/psql/cmd.go +++ b/internal/cmd/plugin/psql/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql @@ -31,19 +34,21 @@ func NewCmd() *cobra.Command { var passStdin bool cmd := &cobra.Command{ - Use: "psql [cluster] [-- psqlArgs...]", + Use: "psql CLUSTER [-- PSQL_ARGS...]", Short: "Start a psql session targeting a CloudNativePG cluster", Args: validatePsqlArgs, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, - Long: "This command will start an interactive psql session inside a PostgreSQL Pod created by CloudNativePG.", + Long: "This command will start an interactive psql session inside a PostgreSQL Pod created by CloudNativePG.", + GroupID: plugin.GroupIDMiscellaneous, RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] psqlArgs := args[1:] psqlOptions := CommandOptions{ Replica: replica, Namespace: plugin.Namespace, + Context: plugin.KubeContext, AllocateTTY: allocateTTY, PassStdin: passStdin, Args: psqlArgs, @@ -91,7 +96,7 @@ func validatePsqlArgs(cmd *cobra.Command, args []string) error { } if cmd.ArgsLenAtDash() > 1 { - return fmt.Errorf("psqlArgs should be passed after -- delimitator") + return fmt.Errorf("psqlArgs should be passed after the -- delimiter") } return nil diff --git a/internal/cmd/plugin/psql/doc.go b/internal/cmd/plugin/psql/doc.go index 4b723495eb..039cf73ef0 100644 --- a/internal/cmd/plugin/psql/doc.go +++ b/internal/cmd/plugin/psql/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package psql implements the `kubectl cnpg psql` command diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go index 1207f50188..362f03068e 100644 --- a/internal/cmd/plugin/psql/psql.go +++ b/internal/cmd/plugin/psql/psql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql @@ -24,6 +27,7 @@ import ( "syscall" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" @@ -58,6 +62,9 @@ type CommandOptions struct { // The Namespace where we're working in Namespace string + // The Context to execute the command + Context string + // Whether we should we allocate a TTY for psql AllocateTTY bool @@ -83,6 +90,11 @@ func NewCommand( return nil, err } + // Check if the pod list is empty + if len(pods.Items) == 0 { + return nil, fmt.Errorf("cluster does not exist or is not accessible") + } + kubectlPath, err := exec.LookPath(kubectlCommand) if err != nil { return nil, fmt.Errorf("while getting kubectl path: %w", err) @@ -97,9 +109,13 @@ func NewCommand( // getKubectlInvocation gets the kubectl command to be executed func (psql *Command) getKubectlInvocation() ([]string, error) { - result := make([]string, 0, 11+len(psql.Args)) + result := make([]string, 0, 13+len(psql.Args)) result = append(result, "kubectl", "exec") + if psql.Context != "" { + result = append(result, "--context", psql.Context) + } + if psql.AllocateTTY { result = append(result, "-t") } @@ -116,6 +132,11 @@ func (psql *Command) getKubectlInvocation() ([]string, error) { return nil, err } + // Default to `postgres` if no-user has been specified + if !slices.Contains(psql.Args, "-U") { + psql.Args = append([]string{"-U", "postgres"}, psql.Args...) + } + result = append(result, podName) result = append(result, "--", "psql") result = append(result, psql.Args...) diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go index a01dd7cbb8..26be3ff2ba 100644 --- a/internal/cmd/plugin/psql/psql_test.go +++ b/internal/cmd/plugin/psql/psql_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql @@ -69,8 +72,9 @@ var _ = Describe("psql launcher", func() { } _, err := cmd.getPodName() - Expect(err).To(HaveOccurred()) - Expect(err.(*ErrMissingPod)).ToNot(BeNil()) + Expect(err).To(MatchError((&ErrMissingPod{ + role: "primary", + }).Error())) }) It("correctly composes a kubectl exec command line", func() { @@ -95,6 +99,8 @@ var _ = Describe("psql launcher", func() { "cluster-example-1", "--", "psql", + "-U", + "postgres", )) }) @@ -120,6 +126,8 @@ var _ = Describe("psql launcher", func() { "cluster-example-1", "--", "psql", + "-U", + "postgres", "-c", "select 1", )) diff --git a/internal/cmd/plugin/psql/suite_test.go b/internal/cmd/plugin/psql/suite_test.go index 9ffe5eb224..6b6d008a7d 100644 --- a/internal/cmd/plugin/psql/suite_test.go +++ b/internal/cmd/plugin/psql/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql diff --git a/internal/cmd/plugin/reload/cmd.go b/internal/cmd/plugin/reload/cmd.go index 1f9a6bb1ab..968c8a65ca 100644 --- a/internal/cmd/plugin/reload/cmd.go +++ b/internal/cmd/plugin/reload/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reload @@ -27,10 +30,11 @@ import ( // NewCmd creates the new "reset" command func NewCmd() *cobra.Command { restartCmd := &cobra.Command{ - Use: "reload [clusterName]", - Short: `Reload the cluster`, - Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`, - Args: plugin.RequiresArguments(1), + Use: "reload CLUSTER", + Short: `Reload a cluster`, + Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`, + GroupID: plugin.GroupIDCluster, + Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, diff --git a/internal/cmd/plugin/reload/reload.go b/internal/cmd/plugin/reload/reload.go index 17967bbf55..b97c72666a 100644 --- a/internal/cmd/plugin/reload/reload.go +++ b/internal/cmd/plugin/reload/reload.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package reload implements a command to trigger a reconciliation loop for a cluster @@ -21,6 +24,7 @@ import ( "context" "fmt" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -42,7 +46,7 @@ func Reload(ctx context.Context, clusterName string) error { if clusterRestarted.Annotations == nil { clusterRestarted.Annotations = make(map[string]string) } - clusterRestarted.Annotations[utils.ClusterReloadAnnotationName] = utils.GetCurrentTimestamp() + clusterRestarted.Annotations[utils.ClusterReloadAnnotationName] = pgTime.GetCurrentTimestamp() clusterRestarted.ManagedFields = nil err = plugin.Client.Patch(ctx, clusterRestarted, client.MergeFrom(&cluster)) diff --git a/internal/cmd/plugin/report/cluster.go b/internal/cmd/plugin/report/cluster.go index 5e5920db37..b851e55358 100644 --- a/internal/cmd/plugin/report/cluster.go +++ b/internal/cmd/plugin/report/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report @@ -32,7 +35,7 @@ func clusterCmd() *cobra.Command { const filePlaceholder = "report_cluster__.zip" cmd := &cobra.Command{ - Use: "cluster ", + Use: "cluster CLUSTER", Short: "Report cluster resources, pods, events, logs (opt-in)", Long: "Collects combined information on the cluster in a Zip file", Args: plugin.RequiresArguments(1), diff --git a/internal/cmd/plugin/report/cluster_report.go b/internal/cmd/plugin/report/cluster_report.go index 68a44d5594..d2eb5aecc5 100644 --- a/internal/cmd/plugin/report/cluster_report.go +++ b/internal/cmd/plugin/report/cluster_report.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/cmd.go b/internal/cmd/plugin/report/cmd.go index c56c7d6417..f018f57888 100644 --- a/internal/cmd/plugin/report/cmd.go +++ b/internal/cmd/plugin/report/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +13,24 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report import ( "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd creates the new "report" command func NewCmd() *cobra.Command { reportCmd := &cobra.Command{ - Use: "report operator/cluster", - Short: "Report on the operator", + Use: "report operator/cluster", + Short: "Report on the operator or a cluster for troubleshooting", + GroupID: plugin.GroupIDTroubleshooting, } reportCmd.AddCommand(operatorCmd()) diff --git a/internal/cmd/plugin/report/logs.go b/internal/cmd/plugin/report/logs.go index 220c6bf3b9..985b5be4d7 100644 --- a/internal/cmd/plugin/report/logs.go +++ b/internal/cmd/plugin/report/logs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report @@ -24,11 +27,13 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/podlogs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" ) const jobMatcherLabel = "job-name" @@ -46,9 +51,7 @@ func streamOperatorLogsToZip( if _, err := zipper.Create(logsDir + "/"); err != nil { return fmt.Errorf("could not add '%s' to zip: %w", logsDir, err) } - podLogOptions := &corev1.PodLogOptions{ - Timestamps: logTimeStamp, // NOTE: when activated, lines are no longer JSON - } + for i := range pods { pod := pods[i] path := filepath.Join(logsDir, fmt.Sprintf("%s-logs.jsonl", pod.Name)) @@ -57,21 +60,12 @@ func streamOperatorLogsToZip( return fmt.Errorf("could not add '%s' to zip: %w", path, zipperErr) } - streamPodLogs := &logs.StreamingRequest{ - Pod: &pod, - Options: podLogOptions, - Previous: true, + streamPodLogs := podlogs.NewPodLogsWriter(pod, kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie())) + opts := &corev1.PodLogOptions{ + Timestamps: logTimeStamp, + Previous: true, } - if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil { - return err - } - _ = streamPodLogs.Stream(ctx, writer) - if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil { - return err - } - - streamPodLogs.Previous = false - if err := streamPodLogs.Stream(ctx, writer); err != nil { + if err := streamPodLogs.Single(ctx, writer, opts); err != nil { return err } } @@ -99,41 +93,25 @@ func streamClusterLogsToZip( utils.ClusterLabelName: clusterName, } - podLogOptions := &corev1.PodLogOptions{ - Timestamps: logTimeStamp, // NOTE: when activated, lines are no longer JSON - } - var podList corev1.PodList err = plugin.Client.List(ctx, &podList, matchClusterName, client.InNamespace(namespace)) if err != nil { return fmt.Errorf("could not get cluster pods: %w", err) } - streamPodLogs := &logs.StreamingRequest{ - Options: podLogOptions, - Previous: true, - } - for _, pod := range podList.Items { - writer, err := zipper.Create(filepath.Join(logsdir, pod.Name) + ".jsonl") - if err != nil { - return fmt.Errorf("could not add '%s' to zip: %w", - filepath.Join(logsdir, pod.Name), err) - } - podPointer := pod - streamPodLogs.Pod = &podPointer + cli := kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie()) - if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil { - return err + for idx := range podList.Items { + pod := podList.Items[idx] + streamPodLogs := podlogs.NewPodLogsWriter(pod, cli) + fileNamer := func(containerName string) string { + return filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, containerName)) } - // We ignore the error because it will error if there are no previous logs - _ = streamPodLogs.Stream(ctx, writer) - if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil { - return err + opts := &corev1.PodLogOptions{ + Timestamps: logTimeStamp, + Previous: true, } - - streamPodLogs.Previous = false - - if err := streamPodLogs.Stream(ctx, writer); err != nil { + if err := streamPodLogs.Multiple(ctx, opts, zipper, fileNamer); err != nil { return err } } @@ -156,13 +134,8 @@ func streamClusterJobLogsToZip(ctx context.Context, clusterName, namespace strin utils.ClusterLabelName: clusterName, } - podLogOptions := &corev1.PodLogOptions{ - Timestamps: logTimeStamp, // NOTE: when activated, lines are no longer JSON - } - var jobList batchv1.JobList - err = plugin.Client.List(ctx, &jobList, matchClusterName, client.InNamespace(namespace)) - if err != nil { + if err := plugin.Client.List(ctx, &jobList, matchClusterName, client.InNamespace(namespace)); err != nil { return fmt.Errorf("could not get cluster jobs: %w", err) } @@ -171,25 +144,21 @@ func streamClusterJobLogsToZip(ctx context.Context, clusterName, namespace strin jobMatcherLabel: job.Name, } var podList corev1.PodList - err = plugin.Client.List(ctx, &podList, matchJobName, client.InNamespace(namespace)) - if err != nil { + if err := plugin.Client.List(ctx, &podList, matchJobName, client.InNamespace(namespace)); err != nil { return fmt.Errorf("could not get pods for job '%s': %w", job.Name, err) } - streamPodLogs := &logs.StreamingRequest{ - Options: podLogOptions, - Previous: false, - } - for _, pod := range podList.Items { - writer, err := zipper.Create(filepath.Join(logsdir, pod.Name) + ".jsonl") - if err != nil { - return fmt.Errorf("could not add '%s' to zip: %w", - filepath.Join(logsdir, pod.Name), err) + for idx := range podList.Items { + pod := podList.Items[idx] + streamPodLogs := podlogs.NewPodLogsWriter(pod, kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie())) + + fileNamer := func(containerName string) string { + return filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, containerName)) + } + opts := corev1.PodLogOptions{ + Timestamps: logTimeStamp, } - podPointer := pod - streamPodLogs.Pod = &podPointer - err = streamPodLogs.Stream(ctx, writer) - if err != nil { + if err := streamPodLogs.Multiple(ctx, &opts, zipper, fileNamer); err != nil { return err } } diff --git a/internal/cmd/plugin/report/olm.go b/internal/cmd/plugin/report/olm.go index cc20ff54ae..9a5c8b0548 100644 --- a/internal/cmd/plugin/report/olm.go +++ b/internal/cmd/plugin/report/olm.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report @@ -46,7 +49,7 @@ func getOlmResourceList( resourceList, err := dynamicClient.Resource(gvr).Namespace(namespace). List(ctx, metav1.ListOptions{LabelSelector: getLabelOperatorsNamespace()}) if err != nil { - return nil, fmt.Errorf("could note get resource: %v, %v", gvr, err) + return nil, fmt.Errorf("could not list resource: %v, %v", gvr, err) } return resourceList, nil diff --git a/internal/cmd/plugin/report/operator.go b/internal/cmd/plugin/report/operator.go index 36dac12843..d30ca842eb 100644 --- a/internal/cmd/plugin/report/operator.go +++ b/internal/cmd/plugin/report/operator.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/operator_objects.go b/internal/cmd/plugin/report/operator_objects.go index f9cb4b5ad0..e585a13bd1 100644 --- a/internal/cmd/plugin/report/operator_objects.go +++ b/internal/cmd/plugin/report/operator_objects.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/operator_report.go b/internal/cmd/plugin/report/operator_report.go index f52785854a..58af2608cb 100644 --- a/internal/cmd/plugin/report/operator_report.go +++ b/internal/cmd/plugin/report/operator_report.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package report implements the kubectl-cnpg report command diff --git a/internal/cmd/plugin/report/operator_utils.go b/internal/cmd/plugin/report/operator_utils.go index e51aea6491..c923e74872 100644 --- a/internal/cmd/plugin/report/operator_utils.go +++ b/internal/cmd/plugin/report/operator_utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report @@ -44,7 +47,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) ( for _, item := range mutatingWebhookConfigList.Items { for _, webhook := range item.Webhooks { - if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.GroupVersion.Group { + if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.SchemeGroupVersion.Group { mWebhookConfig.Items = append(mWebhookConfig.Items, item) } } @@ -63,7 +66,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) ( for _, item := range validatingWebhookConfigList.Items { for _, webhook := range item.Webhooks { - if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.GroupVersion.Group { + if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.SchemeGroupVersion.Group { vWebhookConfig.Items = append(vWebhookConfig.Items, item) } } @@ -79,7 +82,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) ( if len(mWebhookConfig.Items) == 0 || len(vWebhookConfig.Items) == 0 { return nil, nil, fmt.Errorf( "can't find the webhooks that targeting resources within the group %s", - apiv1.GroupVersion.Group, + apiv1.SchemeGroupVersion.Group, ) } diff --git a/internal/cmd/plugin/report/output.go b/internal/cmd/plugin/report/output.go index 3b24c5ee46..6deabf39ea 100644 --- a/internal/cmd/plugin/report/output.go +++ b/internal/cmd/plugin/report/output.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/redactors.go b/internal/cmd/plugin/report/redactors.go index bf78c1bada..7230081002 100644 --- a/internal/cmd/plugin/report/redactors.go +++ b/internal/cmd/plugin/report/redactors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/redactors_test.go b/internal/cmd/plugin/report/redactors_test.go index 9c6a153fc7..010babb7c4 100644 --- a/internal/cmd/plugin/report/redactors_test.go +++ b/internal/cmd/plugin/report/redactors_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/suite_test.go b/internal/cmd/plugin/report/suite_test.go index c1b0b03b6d..f7a582ba54 100644 --- a/internal/cmd/plugin/report/suite_test.go +++ b/internal/cmd/plugin/report/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/restart/cmd.go b/internal/cmd/plugin/restart/cmd.go index 2b686f062c..afbe7022aa 100644 --- a/internal/cmd/plugin/restart/cmd.go +++ b/internal/cmd/plugin/restart/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package restart @@ -21,18 +24,21 @@ import ( "strconv" "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd creates the new "reset" command func NewCmd() *cobra.Command { restartCmd := &cobra.Command{ - Use: "restart clusterName [instance]", + Use: "restart CLUSTER [INSTANCE]", Short: `Restart a cluster or a single instance in a cluster`, Long: `If only the cluster name is specified, the whole cluster will be restarted, rolling out new configurations if present. If a specific instance is specified, only that instance will be restarted, in-place if it is a primary, deleting the pod if it is a replica.`, - Args: cobra.RangeArgs(1, 2), + Args: cobra.RangeArgs(1, 2), + GroupID: plugin.GroupIDCluster, RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() clusterName := args[0] diff --git a/internal/cmd/plugin/restart/restart.go b/internal/cmd/plugin/restart/restart.go index ba2484546a..639349417a 100644 --- a/internal/cmd/plugin/restart/restart.go +++ b/internal/cmd/plugin/restart/restart.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package restart implements a command to rollout restart a cluster or restart a single instance @@ -66,17 +69,14 @@ func instanceRestart(ctx context.Context, clusterName, node string) error { if err != nil { return err } - originalCluster := cluster.DeepCopy() if cluster.Status.CurrentPrimary == node { - cluster.ManagedFields = nil - if err := status.RegisterPhaseWithOrigCluster( + if err := status.PatchWithOptimisticLock( ctx, plugin.Client, &cluster, - originalCluster, - apiv1.PhaseInplacePrimaryRestart, - "Requested by the user", + status.SetPhase(apiv1.PhaseInplacePrimaryRestart, "Requested by the user"), + status.SetClusterReadyCondition, ); err != nil { return fmt.Errorf("while requesting restart on primary POD for cluster %v: %w", clusterName, err) } diff --git a/internal/cmd/plugin/snapshot/cmd.go b/internal/cmd/plugin/snapshot/cmd.go index 8abd4a0752..b5f3b1a35a 100644 --- a/internal/cmd/plugin/snapshot/cmd.go +++ b/internal/cmd/plugin/snapshot/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package snapshot @@ -28,9 +31,10 @@ import ( // NewCmd implements the `snapshot` subcommand func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "snapshot ", - Short: "command removed", - Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`", + Use: "snapshot CLUSTER", + Short: "DEPRECATED (use `backup -m volumeSnapshot` instead)", + Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`", + GroupID: plugin.GroupIDDatabase, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, diff --git a/internal/cmd/plugin/snapshot/doc.go b/internal/cmd/plugin/snapshot/doc.go index cf22d8168f..8ad0849052 100644 --- a/internal/cmd/plugin/snapshot/doc.go +++ b/internal/cmd/plugin/snapshot/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package snapshot implements the snapshot feature diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go index 4ca0f70a63..b36560f758 100644 --- a/internal/cmd/plugin/status/cmd.go +++ b/internal/cmd/plugin/status/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status @@ -28,9 +31,10 @@ import ( // NewCmd create the new "status" subcommand func NewCmd() *cobra.Command { statusCmd := &cobra.Command{ - Use: "status [cluster]", - Short: "Get the status of a PostgreSQL cluster", - Args: plugin.RequiresArguments(1), + Use: "status CLUSTER", + Short: "Get the status of a PostgreSQL cluster", + Args: plugin.RequiresArguments(1), + GroupID: plugin.GroupIDDatabase, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if strings.HasPrefix(toComplete, "-") { fmt.Printf("%+v\n", toComplete) @@ -41,15 +45,15 @@ func NewCmd() *cobra.Command { ctx := cmd.Context() clusterName := args[0] - verbose, _ := cmd.Flags().GetBool("verbose") + verbose, _ := cmd.Flags().GetCount("verbose") output, _ := cmd.Flags().GetString("output") return Status(ctx, clusterName, verbose, plugin.OutputFormat(output)) }, } - statusCmd.Flags().BoolP( - "verbose", "v", false, "Include PostgreSQL configuration, HBA rules, and full replication slots info") + statusCmd.Flags().CountP( + "verbose", "v", "Increase verbosity to display more information") statusCmd.Flags().StringP( "output", "o", "text", "Output format. One of text|json") diff --git a/internal/cmd/plugin/status/doc.go b/internal/cmd/plugin/status/doc.go index 1bc9104790..decdf55fe1 100644 --- a/internal/cmd/plugin/status/doc.go +++ b/internal/cmd/plugin/status/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package status implements the kubectl-cnpg status command diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 15285733fc..43a097c0e0 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status @@ -27,7 +30,9 @@ import ( "time" "github.com/cheynewallace/tabby" - types "github.com/cloudnative-pg/machinery/pkg/types" + "github.com/cloudnative-pg/cnpg-i/pkg/identity" + "github.com/cloudnative-pg/machinery/pkg/stringset" + "github.com/cloudnative-pg/machinery/pkg/types" "github.com/logrusorgru/aurora/v4" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -44,7 +49,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -66,6 +70,9 @@ type PostgresqlStatus struct { // ErrorList store the possible errors while getting the PostgreSQL status ErrorList []error + + // The size of the cluster + TotalClusterSize string } func (fullStatus *PostgresqlStatus) getReplicationSlotList() postgres.PgReplicationSlotList { @@ -96,38 +103,55 @@ func getPrintableIntegerPointer(i *int) string { } // Status implements the "status" subcommand -func Status(ctx context.Context, clusterName string, verbose bool, format plugin.OutputFormat) error { +func Status( + ctx context.Context, + clusterName string, + verbosity int, + format plugin.OutputFormat, +) error { var cluster apiv1.Cluster var errs []error + + // Create a Kubernetes client suitable for calling the "Exec" subresource + clientInterface := kubernetes.NewForConfigOrDie(plugin.Config) + // Get the Cluster object err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster) if err != nil { - return err + return fmt.Errorf("while trying to get cluster %s in namespace %s: %w", + clusterName, plugin.Namespace, err) } status := extractPostgresqlStatus(ctx, cluster) + hibernated, _ := isHibernated(status) + err = plugin.Print(status, format, os.Stdout) if err != nil || format != plugin.OutputFormatText { return err } errs = append(errs, status.ErrorList...) - status.printBasicInfo() + status.printBasicInfo(ctx, clientInterface) status.printHibernationInfo() status.printDemotionTokenInfo() status.printPromotionTokenInfo() - if verbose { - errs = append(errs, status.printPostgresConfiguration(ctx)...) - } - status.printCertificatesStatus() - status.printBackupStatus() - status.printBasebackupStatus() - status.printReplicaStatus(verbose) - status.printUnmanagedReplicationSlotStatus() - status.printRoleManagerStatus() - status.printTablespacesStatus() - status.printPodDisruptionBudgetStatus() - status.printInstancesStatus() + if verbosity > 1 { + errs = append(errs, status.printPostgresConfiguration(ctx, clientInterface)...) + status.printCertificatesStatus() + } + if !hibernated { + status.printBackupStatus() + status.printBasebackupStatus(verbosity) + status.printReplicaStatus(verbosity) + if verbosity > 0 { + status.printUnmanagedReplicationSlotStatus() + status.printRoleManagerStatus() + status.printTablespacesStatus() + status.printPodDisruptionBudgetStatus() + } + status.printInstancesStatus() + } + status.printPluginStatus(verbosity) if len(errs) > 0 { fmt.Println() @@ -154,6 +178,7 @@ func extractPostgresqlStatus(ctx context.Context, cluster apiv1.Cluster) *Postgr // Get the list of Pods created by this Cluster instancesStatus, errList := resources.ExtractInstancesStatus( ctx, + &cluster, plugin.Config, managedPods, ) @@ -188,22 +213,39 @@ func listFencedInstances(fencedInstances *stringset.Data) string { return strings.Join(fencedInstances.ToList(), ", ") } -func (fullStatus *PostgresqlStatus) printBasicInfo() { +func (fullStatus *PostgresqlStatus) getClusterSize(ctx context.Context, client kubernetes.Interface) (string, error) { + timeout := time.Second * 10 + + // Compute the disk space through `du` + output, _, err := utils.ExecCommand( + ctx, + client, + plugin.Config, + fullStatus.PrimaryPod, + specs.PostgresContainerName, + &timeout, + "du", + "-sLh", + specs.PgDataPath) + if err != nil { + return "", err + } + + size, _, _ := strings.Cut(output, "\t") + return size, nil +} + +func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClient kubernetes.Interface) { summary := tabby.New() - cluster := fullStatus.Cluster + clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, k8sClient) - if cluster.IsReplica() { - fmt.Println(aurora.Yellow("Replica Cluster Summary")) - } else { - fmt.Println(aurora.Green("Cluster Summary")) - } + cluster := fullStatus.Cluster primaryInstance := cluster.Status.CurrentPrimary - if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { - primaryInstance = fmt.Sprintf("%v (switching to %v)", - cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary) - } + + // Determine if the cluster is hibernated + hibernated, _ := isHibernated(fullStatus) fencedInstances, err := utils.GetFencedInstances(cluster.Annotations) if err != nil { @@ -212,12 +254,17 @@ func (fullStatus *PostgresqlStatus) printBasicInfo() { isPrimaryFenced := cluster.IsInstanceFenced(cluster.Status.CurrentPrimary) primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() - summary.AddLine("Name:", cluster.Name) - summary.AddLine("Namespace:", cluster.Namespace) + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + primaryInstance = fmt.Sprintf("%v (switching to %v)", + cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary) + } + + summary.AddLine("Name", client.ObjectKeyFromObject(cluster).String()) + if primaryInstanceStatus != nil { summary.AddLine("System ID:", primaryInstanceStatus.SystemID) } - summary.AddLine("PostgreSQL Image:", cluster.GetImageName()) + summary.AddLine("PostgreSQL Image:", cluster.Status.Image) if cluster.IsReplica() { summary.AddLine("Designated primary:", primaryInstance) summary.AddLine("Source cluster: ", cluster.Spec.ReplicaCluster.Source) @@ -225,12 +272,20 @@ func (fullStatus *PostgresqlStatus) printBasicInfo() { summary.AddLine("Primary instance:", primaryInstance) } - primaryStartTime := getPrimaryStartTime(cluster) - if len(primaryStartTime) > 0 { - summary.AddLine("Primary start time:", primaryStartTime) + switch { + case hibernated: + summary.AddLine("Status:", aurora.Red("Hibernated")) + case isPrimaryFenced: + summary.AddLine("Status:", aurora.Red("Primary instance is fenced")) + default: + // Avoid printing the start time when hibernated or fenced + primaryStartTime := getPrimaryStartTime(cluster) + if len(primaryStartTime) > 0 { + summary.AddLine("Primary start time:", primaryStartTime) + } + summary.AddLine("Status:", fullStatus.getStatus(cluster)) } - summary.AddLine("Status:", fullStatus.getStatus(isPrimaryFenced, cluster)) if cluster.Spec.Instances == cluster.Status.Instances { summary.AddLine("Instances:", aurora.Green(cluster.Spec.Instances)) } else { @@ -250,13 +305,19 @@ func (fullStatus *PostgresqlStatus) printBasicInfo() { } } - if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { - if cluster.Status.CurrentPrimary == "" { - fmt.Println(aurora.Red("Primary server is initializing")) - } else { - fmt.Println(aurora.Red("Switchover in progress")) + if clusterSizeErr != nil { + switch { + case hibernated: + summary.AddLine("Size:", "- (hibernated)") + case isPrimaryFenced: + summary.AddLine("Size:", "- (fenced)") + default: + summary.AddLine("Size:", aurora.Red(clusterSizeErr.Error())) } + } else { + summary.AddLine("Size:", clusterSize) } + if !cluster.IsReplica() && primaryInstanceStatus != nil { lsnInfo := fmt.Sprintf( "%s (Timeline: %d - WAL File: %s)", @@ -267,29 +328,38 @@ func (fullStatus *PostgresqlStatus) printBasicInfo() { summary.AddLine("Current Write LSN:", lsnInfo) } + if cluster.IsReplica() { + fmt.Println(aurora.Yellow("Replica Cluster Summary")) + } else { + fmt.Println(aurora.Green("Cluster Summary")) + } + + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + if cluster.Status.CurrentPrimary == "" { + fmt.Println(aurora.Red("Primary server is initializing")) + } else { + fmt.Println(aurora.Red("Switchover in progress")) + } + } + summary.Print() fmt.Println() } func (fullStatus *PostgresqlStatus) printHibernationInfo() { - cluster := fullStatus.Cluster - - hibernationCondition := meta.FindStatusCondition( - cluster.Status.Conditions, - hibernation.HibernationConditionType, - ) + hibernated, hibernationCondition := isHibernated(fullStatus) if hibernationCondition == nil { return } hibernationStatus := tabby.New() - if hibernationCondition.Status == metav1.ConditionTrue { + if hibernated { hibernationStatus.AddLine("Status", "Hibernated") } else { hibernationStatus.AddLine("Status", "Active") } hibernationStatus.AddLine("Message", hibernationCondition.Message) - hibernationStatus.AddLine("Time", hibernationCondition.LastTransitionTime.Time.UTC()) + hibernationStatus.AddLine("Time", hibernationCondition.LastTransitionTime.UTC()) fmt.Println(aurora.Green("Hibernation")) hibernationStatus.Print() @@ -297,6 +367,20 @@ func (fullStatus *PostgresqlStatus) printHibernationInfo() { fmt.Println() } +func isHibernated(fullStatus *PostgresqlStatus) (bool, *metav1.Condition) { + cluster := fullStatus.Cluster + hibernationCondition := meta.FindStatusCondition( + cluster.Status.Conditions, + hibernation.HibernationConditionType, + ) + + if hibernationCondition == nil || hibernationCondition.Status != metav1.ConditionTrue { + return false, hibernationCondition + } + + return true, hibernationCondition +} + func (fullStatus *PostgresqlStatus) printTokenStatus(token string) { primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() @@ -380,11 +464,7 @@ func (fullStatus *PostgresqlStatus) printPromotionTokenInfo() { fmt.Println() } -func (fullStatus *PostgresqlStatus) getStatus(isPrimaryFenced bool, cluster *apiv1.Cluster) string { - if isPrimaryFenced { - return fmt.Sprintf("%v", aurora.Red("Primary instance is fenced")) - } - +func (fullStatus *PostgresqlStatus) getStatus(cluster *apiv1.Cluster) string { switch cluster.Status.Phase { case apiv1.PhaseHealthy, apiv1.PhaseFirstPrimary, apiv1.PhaseCreatingReplica: return fmt.Sprintf("%v %v", aurora.Green(cluster.Status.Phase), cluster.Status.PhaseReason) @@ -395,13 +475,15 @@ func (fullStatus *PostgresqlStatus) getStatus(isPrimaryFenced bool, cluster *api } } -func (fullStatus *PostgresqlStatus) printPostgresConfiguration(ctx context.Context) []error { +func (fullStatus *PostgresqlStatus) printPostgresConfiguration( + ctx context.Context, + client kubernetes.Interface, +) []error { timeout := time.Second * 10 - clientInterface := kubernetes.NewForConfigOrDie(plugin.Config) var errs []error // Read PostgreSQL configuration from custom.conf - customConf, _, err := utils.ExecCommand(ctx, clientInterface, plugin.Config, fullStatus.PrimaryPod, + customConf, _, err := utils.ExecCommand(ctx, client, plugin.Config, fullStatus.PrimaryPod, specs.PostgresContainerName, &timeout, "cat", @@ -411,7 +493,7 @@ func (fullStatus *PostgresqlStatus) printPostgresConfiguration(ctx context.Conte } // Read PostgreSQL HBA Rules from pg_hba.conf - pgHBAConf, _, err := utils.ExecCommand(ctx, clientInterface, plugin.Config, fullStatus.PrimaryPod, + pgHBAConf, _, err := utils.ExecCommand(ctx, client, plugin.Config, fullStatus.PrimaryPod, specs.PostgresContainerName, &timeout, "cat", path.Join(specs.PgDataPath, constants.PostgresqlHBARulesFile)) if err != nil { @@ -439,7 +521,7 @@ func (fullStatus *PostgresqlStatus) printBackupStatus() { return } status := tabby.New() - FPoR := cluster.Status.FirstRecoverabilityPoint + FPoR := cluster.Status.FirstRecoverabilityPoint //nolint:staticcheck if FPoR == "" { FPoR = "Not Available" } @@ -488,9 +570,9 @@ func (fullStatus *PostgresqlStatus) areReplicationSlotsEnabled() bool { fullStatus.Cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() } -func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbose bool) { +func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbosity int) { switch { - case fullStatus.areReplicationSlotsEnabled() && verbose: + case fullStatus.areReplicationSlotsEnabled() && verbosity > 0: table.AddHeader( "Name", "Sent LSN", @@ -508,7 +590,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T "Slot WAL Status", "Slot Safe WAL Size", ) - case fullStatus.areReplicationSlotsEnabled() && !verbose: + case fullStatus.areReplicationSlotsEnabled() && verbosity == 0: table.AddHeader( "Name", "Sent LSN", @@ -544,7 +626,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( applicationName string, columns *[]interface{}, - verbose bool, + verbosity int, ) { printSlotActivity := func(isActive bool) string { if isActive { @@ -554,18 +636,18 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( } slot := fullStatus.getPrintableReplicationSlotInfo(applicationName) switch { - case slot != nil && verbose: + case slot != nil && verbosity > 0: *columns = append(*columns, printSlotActivity(slot.Active), slot.RestartLsn, slot.WalStatus, getPrintableIntegerPointer(slot.SafeWalSize), ) - case slot != nil && !verbose: + case slot != nil && verbosity == 0: *columns = append(*columns, printSlotActivity(slot.Active), ) - case slot == nil && verbose: + case slot == nil && verbosity > 0: *columns = append(*columns, "-", "-", @@ -579,7 +661,7 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( } } -func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) { +func (fullStatus *PostgresqlStatus) printReplicaStatus(verbosity int) { if fullStatus.Cluster.IsReplica() { return } @@ -605,17 +687,17 @@ func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) { } if fullStatus.areReplicationSlotsEnabled() { - fmt.Println(aurora.Yellow("Replication Slots Enabled").String()) + fmt.Println(aurora.Green("Replication Slots Enabled").String()) } status := tabby.New() - fullStatus.printReplicaStatusTableHeader(status, verbose) + fullStatus.printReplicaStatusTableHeader(status, verbosity) // print Replication Slots columns only if the cluster has replication slots enabled addReplicationSlotsColumns := func(_ string, _ *[]interface{}) {} if fullStatus.areReplicationSlotsEnabled() { addReplicationSlotsColumns = func(applicationName string, columns *[]interface{}) { - fullStatus.addReplicationSlotsColumns(applicationName, columns, verbose) + fullStatus.addReplicationSlotsColumns(applicationName, columns, verbosity) } } @@ -656,13 +738,13 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { // else: // if it is paused, print "Standby (paused)" // else if SyncState = sync/quorum print "Standby (sync)" + // else if SyncState = potential print "Standby (potential sync)" // else print "Standby (async)" status := tabby.New() fmt.Println(aurora.Green("Instances status")) status.AddHeader( "Name", - "Database Size", "Current LSN", // For standby use "Replay LSN" "Replication role", "Status", @@ -677,7 +759,6 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { instance.Pod.Name, "-", "-", - "-", apierrs.ReasonForError(instance.Error), instance.Pod.Status.QOSClass, "-", @@ -693,7 +774,6 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { replicaRole := getReplicaRole(instance, fullStatus) status.AddLine( instance.Pod.Name, - instance.TotalInstanceSize, getCurrentLSN(instance), replicaRole, statusMsg, @@ -704,6 +784,7 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { continue } status.Print() + fmt.Println() } func (fullStatus *PostgresqlStatus) printCertificatesStatus() { @@ -813,6 +894,8 @@ func getReplicaRole(instance postgres.PostgresqlStatus, fullStatus *PostgresqlSt switch state.SyncState { case "quorum", "sync": return "Standby (sync)" + case "potential": + return "Standby (potential sync)" case "async": return "Standby (async)" default: @@ -935,7 +1018,7 @@ func (fullStatus *PostgresqlStatus) printPodDisruptionBudgetStatus() { fmt.Println() } -func (fullStatus *PostgresqlStatus) printBasebackupStatus() { +func (fullStatus *PostgresqlStatus) printBasebackupStatus(verbosity int) { const header = "Physical backups" primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() @@ -947,9 +1030,11 @@ func (fullStatus *PostgresqlStatus) printBasebackupStatus() { } if len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { - fmt.Println(aurora.Green(header)) - fmt.Println(aurora.Yellow("No running physical backups found").String()) - fmt.Println() + if verbosity > 0 { + fmt.Println(aurora.Green(header)) + fmt.Println(aurora.Yellow("No running physical backups found").String()) + fmt.Println() + } return } @@ -1098,6 +1183,66 @@ func (fullStatus *PostgresqlStatus) printTablespacesStatus() { fmt.Println() } +func (fullStatus *PostgresqlStatus) printPluginStatus(verbosity int) { + const header = "Plugins status" + + parseCapabilities := func(capabilities []string) string { + if len(capabilities) == 0 { + return "N/A" + } + + result := make([]string, len(capabilities)) + for idx, capability := range capabilities { + switch capability { + case identity.PluginCapability_Service_TYPE_BACKUP_SERVICE.String(): + result[idx] = "Backup Service" + case identity.PluginCapability_Service_TYPE_RESTORE_JOB.String(): + result[idx] = "Restore Job" + case identity.PluginCapability_Service_TYPE_RECONCILER_HOOKS.String(): + result[idx] = "Reconciler Hooks" + case identity.PluginCapability_Service_TYPE_WAL_SERVICE.String(): + result[idx] = "WAL Service" + case identity.PluginCapability_Service_TYPE_OPERATOR_SERVICE.String(): + result[idx] = "Operator Service" + case identity.PluginCapability_Service_TYPE_LIFECYCLE_SERVICE.String(): + result[idx] = "Lifecycle Service" + case identity.PluginCapability_Service_TYPE_POSTGRES.String(): + result[idx] = "Postgres Service" + case identity.PluginCapability_Service_TYPE_UNSPECIFIED.String(): + continue + default: + result[idx] = capability + } + } + + return strings.Join(result, ", ") + } + + if len(fullStatus.Cluster.Status.PluginStatus) == 0 { + if verbosity > 0 { + fmt.Println(aurora.Green(header)) + fmt.Println("No plugins found") + } + return + } + + fmt.Println(aurora.Green(header)) + + status := tabby.New() + status.AddHeader("Name", "Version", "Status", "Reported Operator Capabilities") + + for _, plg := range fullStatus.Cluster.Status.PluginStatus { + plgStatus := "N/A" + if plg.Status != "" { + plgStatus = plg.Status + } + status.AddLine(plg.Name, plg.Version, plgStatus, parseCapabilities(plg.Capabilities)) + } + + status.Print() + fmt.Println() +} + func getPrimaryStartTime(cluster *apiv1.Cluster) string { return getPrimaryStartTimeIdempotent(cluster, time.Now()) } diff --git a/internal/cmd/plugin/status/status_test.go b/internal/cmd/plugin/status/status_test.go index d39a949924..05ae1688a0 100644 --- a/internal/cmd/plugin/status/status_test.go +++ b/internal/cmd/plugin/status/status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/internal/cmd/plugin/status/suite_test.go b/internal/cmd/plugin/status/suite_test.go index 23daf63ee5..2586fc4b6e 100644 --- a/internal/cmd/plugin/status/suite_test.go +++ b/internal/cmd/plugin/status/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/internal/cmd/plugin/suite_test.go b/internal/cmd/plugin/suite_test.go index 4628a4efab..c10994180d 100644 --- a/internal/cmd/plugin/suite_test.go +++ b/internal/cmd/plugin/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin @@ -48,8 +51,6 @@ func TestPlugin(t *testing.T) { } var _ = BeforeSuite(func() { - By("bootstrapping test environment") - if os.Getenv("USE_EXISTING_CLUSTER") == "true" { By("using existing config for test environment") testEnv = &envtest.Environment{} @@ -65,6 +66,12 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) + DeferCleanup(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }) + err = apiv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) @@ -74,9 +81,3 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) }) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/internal/cmd/versions/cmd.go b/internal/cmd/versions/cmd.go index 498457dcd5..4fc872cb09 100644 --- a/internal/cmd/versions/cmd.go +++ b/internal/cmd/versions/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package versions builds the version subcommand for both manager and plugins diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go index f2a7a211ba..6103462380 100644 --- a/internal/cnpi/plugin/client/backup.go +++ b/internal/cnpi/plugin/client/backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -19,7 +22,6 @@ package client import ( "context" "encoding/json" - "errors" "fmt" "slices" "time" @@ -30,20 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - // ErrPluginNotLoaded is raised when the plugin that should manage the backup - // have not been loaded inside the cluster - ErrPluginNotLoaded = errors.New("plugin not loaded") - - // ErrPluginNotSupportBackup is raised when the plugin that should manage the backup - // doesn't support the Backup service - ErrPluginNotSupportBackup = errors.New("plugin does not support Backup service") - - // ErrPluginNotSupportBackupEndpoint is raised when the plugin that should manage the backup - // doesn't support the Backup RPC endpoint - ErrPluginNotSupportBackupEndpoint = errors.New("plugin does not support the Backup RPC call") -) - // BackupResponse is the status of a newly created backup. This is used as a return // type for the Backup RPC Call type BackupResponse struct { @@ -86,6 +74,9 @@ type BackupResponse struct { // This field is set to true for online/hot backups and to false otherwise. Online bool + + // This field contains the metadata to be associated with this backup + Metadata map[string]string } func (data *data) Backup( @@ -94,6 +85,17 @@ func (data *data) Backup( backupObject client.Object, pluginName string, parameters map[string]string, +) (*BackupResponse, error) { + b, err := data.innerBackup(ctx, cluster, backupObject, pluginName, parameters) + return b, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerBackup( + ctx context.Context, + cluster client.Object, + backupObject client.Object, + pluginName string, + parameters map[string]string, ) (*BackupResponse, error) { contextLogger := log.FromContext(ctx) @@ -162,5 +164,6 @@ func (data *data) Backup( TablespaceMapFile: result.TablespaceMapFile, InstanceID: result.InstanceId, Online: result.Online, + Metadata: result.Metadata, }, nil } diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go index 9d62c1a7a7..59f3e4cbd3 100644 --- a/internal/cnpi/plugin/client/client.go +++ b/internal/cnpi/plugin/client/client.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -20,6 +23,7 @@ import ( "context" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" @@ -42,18 +46,6 @@ func (data *data) getPlugin(pluginName string) (connection.Interface, error) { return nil, ErrPluginNotLoaded } -func (data *data) load(ctx context.Context, names ...string) error { - for _, name := range names { - pluginData, err := data.repository.GetConnection(ctx, name) - if err != nil { - return err - } - - data.plugins = append(data.plugins, pluginData) - } - return nil -} - func (data *data) MetadataList() []connection.Metadata { result := make([]connection.Metadata, len(data.plugins)) for i := range data.plugins { @@ -63,6 +55,11 @@ func (data *data) MetadataList() []connection.Metadata { return result } +func (data *data) HasPlugin(pluginName string) bool { + _, err := data.getPlugin(pluginName) + return err == nil +} + func (data *data) Close(ctx context.Context) { contextLogger := log.FromContext(ctx) for i := range data.plugins { @@ -83,7 +80,27 @@ func WithPlugins(ctx context.Context, repository repository.Interface, names ... result := &data{ repository: repository, } - if err := result.load(ctx, names...); err != nil { + + load := func(names ...string) error { + for _, name := range names { + pluginData, err := result.repository.GetConnection(ctx, name) + if err != nil { + return err + } + + result.plugins = append(result.plugins, pluginData) + } + return nil + } + + // The following ensures that each plugin is loaded just one + // time, even when the same plugin has been requested multiple + // times. + loadingPlugins := stringset.From(names) + uniqueSortedPluginName := loadingPlugins.ToSortedList() + + if err := load(uniqueSortedPluginName...); err != nil { + result.Close(ctx) return nil, err } diff --git a/internal/cnpi/plugin/client/cluster.go b/internal/cnpi/plugin/client/cluster.go index c5ae9b4456..6429cb8ba7 100644 --- a/internal/cnpi/plugin/client/cluster.go +++ b/internal/cnpi/plugin/client/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -19,7 +22,6 @@ package client import ( "context" "encoding/json" - "errors" "fmt" "slices" @@ -31,6 +33,11 @@ import ( ) func (data *data) MutateCluster(ctx context.Context, object client.Object, mutatedObject client.Object) error { + err := data.innerMutateCluster(ctx, object, mutatedObject) + return wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerMutateCluster(ctx context.Context, object client.Object, mutatedObject client.Object) error { contextLogger := log.FromContext(ctx) serializedObject, err := json.Marshal(object) @@ -93,11 +100,16 @@ func (data *data) MutateCluster(ctx context.Context, object client.Object, mutat } var ( - errInvalidJSON = errors.New("invalid json") - errSetStatusInCluster = errors.New("SetStatusInCluster invocation failed") + errInvalidJSON = newPluginError("invalid json") + errSetStatusInCluster = newPluginError("SetStatusInCluster invocation failed") ) func (data *data) SetStatusInCluster(ctx context.Context, cluster client.Object) (map[string]string, error) { + m, err := data.innerSetStatusInCluster(ctx, cluster) + return m, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerSetStatusInCluster(ctx context.Context, cluster client.Object) (map[string]string, error) { contextLogger := log.FromContext(ctx) serializedObject, err := json.Marshal(cluster) if err != nil { @@ -147,6 +159,14 @@ func (data *data) SetStatusInCluster(ctx context.Context, cluster client.Object) func (data *data) ValidateClusterCreate( ctx context.Context, object client.Object, +) (field.ErrorList, error) { + result, err := data.innerValidateClusterCreate(ctx, object) + return result, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerValidateClusterCreate( + ctx context.Context, + object client.Object, ) (field.ErrorList, error) { contextLogger := log.FromContext(ctx) @@ -189,6 +209,15 @@ func (data *data) ValidateClusterUpdate( ctx context.Context, oldObject client.Object, newObject client.Object, +) (field.ErrorList, error) { + result, err := data.innerValidateClusterUpdate(ctx, oldObject, newObject) + return result, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerValidateClusterUpdate( + ctx context.Context, + oldObject client.Object, + newObject client.Object, ) (field.ErrorList, error) { contextLogger := log.FromContext(ctx) diff --git a/internal/cnpi/plugin/client/cluster_test.go b/internal/cnpi/plugin/client/cluster_test.go index 2d574c70f4..f2b8380257 100644 --- a/internal/cnpi/plugin/client/cluster_test.go +++ b/internal/cnpi/plugin/client/cluster_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -22,7 +25,6 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/operator" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" . "github.com/onsi/ginkgo/v2" @@ -33,9 +35,9 @@ var _ = Describe("SetStatusInCluster", func() { const pluginName = "fake-plugin" const pluginName2 = "fake-plugin2" - var cluster *apiv1.Cluster + var cluster fakeCluster BeforeEach(func() { - cluster = &apiv1.Cluster{} + cluster = fakeCluster{} }) It("should correctly set the status of a single plugin", func(ctx SpecContext) { diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index 1489181fb6..03bf7fe32d 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -19,12 +22,14 @@ package client import ( "context" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) // Client describes a set of behaviour needed to properly handle all the plugin client expected features @@ -35,6 +40,29 @@ type Client interface { LifecycleCapabilities WalCapabilities BackupCapabilities + RestoreJobHooksCapabilities + PostgresConfigurationCapabilities + MetricsCapabilities +} + +// SetPluginClientInContext records the plugin client in the given context +func SetPluginClientInContext(ctx context.Context, client Client) context.Context { + return context.WithValue(ctx, contextutils.PluginClientKey, client) +} + +// GetPluginClientFromContext gets the current plugin client from the context +func GetPluginClientFromContext(ctx context.Context) Client { + v := ctx.Value(contextutils.PluginClientKey) + if v == nil { + return nil + } + + cli, ok := v.(Client) + if !ok { + return nil + } + + return cli } // Connection describes a set of behaviour needed to properly handle the plugin connections @@ -44,6 +72,8 @@ type Connection interface { // MetadataList exposes the metadata of the loaded plugins MetadataList() []connection.Metadata + + HasPlugin(pluginName string) bool } // ClusterCapabilities describes a set of behaviour needed to implement the Cluster capabilities @@ -77,9 +107,10 @@ type ClusterCapabilities interface { // ReconcilerHookResult is the result of a reconciliation loop type ReconcilerHookResult struct { - Result ctrl.Result - Err error - StopReconciliation bool + Result ctrl.Result `json:"result"` + Err error `json:"err"` + StopReconciliation bool `json:"stopReconciliation"` + Identifier string `json:"identifier"` } // ClusterReconcilerHooks decsribes a set of behavior needed to enhance @@ -122,13 +153,14 @@ type WalCapabilities interface { ) error // RestoreWAL calls the loaded plugins to archive a WAL file. - // This call is a no-op if there's no plugin implementing WAL archiving + // This call returns a boolean indicating if the WAL was restored + // by a plugin and the occurred error. RestoreWAL( ctx context.Context, cluster client.Object, sourceWALName string, destinationFileName string, - ) error + ) (bool, error) } // BackupCapabilities describes a set of behaviour needed to backup @@ -143,3 +175,8 @@ type BackupCapabilities interface { parameters map[string]string, ) (*BackupResponse, error) } + +// RestoreJobHooksCapabilities describes a set of behaviour needed to run the Restore +type RestoreJobHooksCapabilities interface { + Restore(ctx context.Context, cluster gvkEnsurer) (*restore.RestoreResponse, error) +} diff --git a/internal/cnpi/plugin/client/create.go b/internal/cnpi/plugin/client/create.go new file mode 100644 index 0000000000..fc08a0f707 --- /dev/null +++ b/internal/cnpi/plugin/client/create.go @@ -0,0 +1,57 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" +) + +// NewClient creates a new CNPI client +func NewClient(ctx context.Context, enabledPlugin *stringset.Data) (Client, error) { + cli, err := innerNewClient(ctx, enabledPlugin) + return cli, wrapAsPluginErrorIfNeeded(err) +} + +func innerNewClient(ctx context.Context, enabledPlugin *stringset.Data) (Client, error) { + contextLogger := log.FromContext(ctx) + plugins := repository.New() + + // TODO: make che socketDir a parameter + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { + contextLogger.Error(err, "Error while loading local plugins") + plugins.Close() + return nil, err + } + + availablePluginNamesSet := stringset.From(availablePluginNames) + availableAndEnabled := stringset.From(availablePluginNamesSet.Intersect(enabledPlugin).ToList()) + return WithPlugins( + ctx, + plugins, + availableAndEnabled.ToList()..., + ) +} diff --git a/internal/cnpi/plugin/client/doc.go b/internal/cnpi/plugin/client/doc.go index 1cb0e5ee6d..e0285d00df 100644 --- a/internal/cnpi/plugin/client/doc.go +++ b/internal/cnpi/plugin/client/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package client contains a set of helper structures for CNPG to use the diff --git a/internal/cnpi/plugin/client/errors.go b/internal/cnpi/plugin/client/errors.go new file mode 100644 index 0000000000..ad99ed2bca --- /dev/null +++ b/internal/cnpi/plugin/client/errors.go @@ -0,0 +1,73 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import "errors" + +var ( + // ErrPluginNotLoaded is raised when the plugin that should manage the backup + // have not been loaded inside the cluster + ErrPluginNotLoaded = newPluginError("plugin not loaded") + + // ErrPluginNotSupportBackup is raised when the plugin that should manage the backup + // doesn't support the Backup service + ErrPluginNotSupportBackup = newPluginError("plugin does not support Backup service") + + // ErrPluginNotSupportBackupEndpoint is raised when the plugin that should manage the backup + // doesn't support the Backup RPC endpoint + ErrPluginNotSupportBackupEndpoint = newPluginError("plugin does not support the Backup RPC call") +) + +type pluginError struct { + innerErr error +} + +func (e *pluginError) Error() string { + return e.innerErr.Error() +} + +func (e *pluginError) Unwrap() error { + return e.innerErr +} + +func newPluginError(msg string) error { + return &pluginError{innerErr: errors.New(msg)} +} + +// ContainsPluginError checks if the provided error chain contains a plugin error. +func ContainsPluginError(err error) bool { + if err == nil { + return false + } + + var pluginErr *pluginError + return errors.As(err, &pluginErr) +} + +func wrapAsPluginErrorIfNeeded(err error) error { + if err == nil { + return nil + } + if ContainsPluginError(err) { + return err + } + + return &pluginError{innerErr: err} +} diff --git a/internal/cnpi/plugin/client/errors_test.go b/internal/cnpi/plugin/client/errors_test.go new file mode 100644 index 0000000000..9c68a3da19 --- /dev/null +++ b/internal/cnpi/plugin/client/errors_test.go @@ -0,0 +1,122 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "errors" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("wrapAsPluginErrorIfNeeded", func() { + It("should return nil when err is nil", func() { + result := wrapAsPluginErrorIfNeeded(nil) + Expect(result).ToNot(HaveOccurred()) + }) + + It("should return the same error when it already contains a plugin error", func() { + originalErr := newPluginError("original plugin error") + wrappedErr := fmt.Errorf("wrapped: %w", originalErr) + + result := wrapAsPluginErrorIfNeeded(wrappedErr) + Expect(result).To(Equal(wrappedErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + }) + + It("should wrap a non-plugin error as a plugin error", func() { + originalErr := errors.New("some regular error") + + result := wrapAsPluginErrorIfNeeded(originalErr) + Expect(result).ToNot(Equal(originalErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + Expect(result.Error()).To(Equal(originalErr.Error())) + }) + + It("should wrap a nested non-plugin error as a plugin error", func() { + originalErr := errors.New("base error") + wrappedErr := fmt.Errorf("context: %w", originalErr) + + result := wrapAsPluginErrorIfNeeded(wrappedErr) + Expect(result).ToNot(Equal(wrappedErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + Expect(result.Error()).To(Equal(wrappedErr.Error())) + Expect(errors.Is(result, originalErr)).To(BeTrue()) + }) + + It("should not double-wrap an already wrapped plugin error", func() { + originalErr := errors.New("base error") + pluginErr := &pluginError{innerErr: originalErr} + wrappedPluginErr := fmt.Errorf("additional context: %w", pluginErr) + + result := wrapAsPluginErrorIfNeeded(wrappedPluginErr) + Expect(result).To(Equal(wrappedPluginErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + }) +}) + +var _ = Describe("ContainsPluginError", func() { + It("should return false when err is nil", func() { + result := ContainsPluginError(nil) + Expect(result).To(BeFalse()) + }) + + It("should return true when error is a direct plugin error", func() { + pluginErr := newPluginError("test plugin error") + + result := ContainsPluginError(pluginErr) + Expect(result).To(BeTrue()) + }) + + It("should return true when plugin error is wrapped with fmt.Errorf", func() { + pluginErr := newPluginError("original plugin error") + wrappedErr := fmt.Errorf("context: %w", pluginErr) + + result := ContainsPluginError(wrappedErr) + Expect(result).To(BeTrue()) + }) + + It("should return true when plugin error is deeply nested", func() { + pluginErr := newPluginError("base plugin error") + wrappedOnce := fmt.Errorf("level 1: %w", pluginErr) + wrappedTwice := fmt.Errorf("level 2: %w", wrappedOnce) + + result := ContainsPluginError(wrappedTwice) + Expect(result).To(BeTrue()) + }) + + It("should return false when error chain contains no plugin error", func() { + baseErr := errors.New("base error") + wrappedErr := fmt.Errorf("wrapped: %w", baseErr) + + result := ContainsPluginError(wrappedErr) + Expect(result).To(BeFalse()) + }) + + It("should return true when error chain contains plugin error mixed with other errors", func() { + baseErr := errors.New("base error") + pluginErr := &pluginError{innerErr: baseErr} + wrappedErr := fmt.Errorf("additional context: %w", pluginErr) + + result := ContainsPluginError(wrappedErr) + Expect(result).To(BeTrue()) + }) +}) diff --git a/internal/cnpi/plugin/client/lifecycle.go b/internal/cnpi/plugin/client/lifecycle.go index 3e47d37ff4..1ba289e890 100644 --- a/internal/cnpi/plugin/client/lifecycle.go +++ b/internal/cnpi/plugin/client/lifecycle.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -46,6 +49,16 @@ func (data *data) LifecycleHook( operationType plugin.OperationVerb, cluster client.Object, object client.Object, +) (client.Object, error) { + obj, err := data.innerLifecycleHook(ctx, operationType, cluster, object) + return obj, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerLifecycleHook( + ctx context.Context, + operationType plugin.OperationVerb, + cluster client.Object, + object client.Object, ) (client.Object, error) { contextLogger := log.FromContext(ctx).WithName("lifecycle_hook") diff --git a/internal/cnpi/plugin/client/lifecycle_test.go b/internal/cnpi/plugin/client/lifecycle_test.go index dd5d38ec1a..bcff9384bd 100644 --- a/internal/cnpi/plugin/client/lifecycle_test.go +++ b/internal/cnpi/plugin/client/lifecycle_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/metrics.go b/internal/cnpi/plugin/client/metrics.go new file mode 100644 index 0000000000..765c221f6b --- /dev/null +++ b/internal/cnpi/plugin/client/metrics.go @@ -0,0 +1,145 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "encoding/json" + "errors" + "slices" + + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MetricsCapabilities defines the interface for plugins that can provide metrics capabilities. +type MetricsCapabilities interface { + // GetMetricsDefinitions retrieves the definitions of the metrics that will be collected from the plugins. + GetMetricsDefinitions(ctx context.Context, cluster client.Object) (PluginMetricDefinitions, error) + // CollectMetrics collects the metrics from the plugins. + CollectMetrics(ctx context.Context, cluster client.Object) ([]*metrics.CollectMetric, error) +} + +// PluginMetricDefinitions is a slice of PluginMetricDefinition, representing the metrics definitions returned +// by plugins. +type PluginMetricDefinitions []PluginMetricDefinition + +// Get returns the PluginMetricDefinition with the given fully qualified name (FqName), returns nil if not found. +func (p PluginMetricDefinitions) Get(fqName string) *PluginMetricDefinition { + for _, metric := range p { + if metric.FqName == fqName { + return &metric + } + } + + return nil +} + +// PluginMetricDefinition represents a metric definition returned by a plugin. +type PluginMetricDefinition struct { + FqName string + ValueType prometheus.ValueType + Desc *prometheus.Desc +} + +func (data *data) GetMetricsDefinitions( + ctx context.Context, + cluster client.Object, +) (PluginMetricDefinitions, error) { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_definitions") + + clusterDefinition, marshalErr := json.Marshal(cluster) + if marshalErr != nil { + return nil, marshalErr + } + + var results PluginMetricDefinitions + + for idx := range data.plugins { + plugin := data.plugins[idx] + if !slices.Contains(plugin.MetricsCapabilities(), metrics.MetricsCapability_RPC_TYPE_METRICS) { + contextLogger.Debug("skipping plugin", "plugin", plugin.Name()) + continue + } + + res, err := plugin.MetricsClient().Define(ctx, &metrics.DefineMetricsRequest{ClusterDefinition: clusterDefinition}) + if err != nil { + contextLogger.Error(err, "failed to get metrics definitions from plugin", "plugin", plugin.Name()) + return nil, err + } + if res == nil { + err := errors.New("plugin returned nil metrics definitions while having metrics capability") + contextLogger.Error(err, "while invoking metrics definitions", "plugin", plugin.Name()) + return nil, err + } + + contextLogger.Debug("plugin returned metrics definitions", "plugin", plugin.Name(), "metrics", res.Metrics) + for _, element := range res.Metrics { + desc := prometheus.NewDesc(element.FqName, element.Help, element.VariableLabels, element.ConstLabels) + results = append(results, PluginMetricDefinition{ + FqName: element.FqName, + Desc: desc, + ValueType: prometheus.ValueType(element.ValueType.Type), + }) + } + } + + return results, nil +} + +func (data *data) CollectMetrics( + ctx context.Context, + cluster client.Object, +) ([]*metrics.CollectMetric, error) { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_collect") + + clusterDefinition, marshalErr := json.Marshal(cluster) + if marshalErr != nil { + return nil, marshalErr + } + + var results []*metrics.CollectMetric + + for idx := range data.plugins { + plugin := data.plugins[idx] + if !slices.Contains(plugin.MetricsCapabilities(), metrics.MetricsCapability_RPC_TYPE_METRICS) { + contextLogger.Debug("skipping plugin", "plugin", plugin.Name()) + continue + } + + res, err := plugin.MetricsClient().Collect(ctx, &metrics.CollectMetricsRequest{ClusterDefinition: clusterDefinition}) + if err != nil { + contextLogger.Error(err, "failed to collect metrics from plugin", "plugin", plugin.Name()) + return nil, err + } + if res == nil { + err := errors.New("plugin returned nil metrics while having metrics capability") + contextLogger.Error(err, "while invoking metrics collection", "plugin", plugin.Name()) + return nil, err + } + + contextLogger.Debug("plugin returned metrics", "plugin", plugin.Name(), "metrics", res.Metrics) + results = append(results, res.Metrics...) + } + + return results, nil +} diff --git a/internal/cnpi/plugin/client/postgres.go b/internal/cnpi/plugin/client/postgres.go new file mode 100644 index 0000000000..4679cfc4ba --- /dev/null +++ b/internal/cnpi/plugin/client/postgres.go @@ -0,0 +1,93 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "encoding/json" + "slices" + + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PostgresConfigurationCapabilities is the interface that defines the +// capabilities of interacting with PostgreSQL. +type PostgresConfigurationCapabilities interface { + // EnrichConfiguration is the method that enriches the PostgreSQL configuration + EnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgresClient.OperationType_Type, + ) (map[string]string, error) +} + +func (data *data) EnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgresClient.OperationType_Type, +) (map[string]string, error) { + m, err := data.innerEnrichConfiguration(ctx, cluster, config, operationType) + return m, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerEnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgresClient.OperationType_Type, +) (map[string]string, error) { + tempConfig := config + + contextLogger := log.FromContext(ctx).WithName("enrichConfiguration") + + clusterDefinition, marshalErr := json.Marshal(cluster) + if marshalErr != nil { + return nil, marshalErr + } + + for idx := range data.plugins { + plugin := data.plugins[idx] + + if !slices.Contains(plugin.PostgresCapabilities(), postgresClient.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION) { + contextLogger.Debug("skipping plugin", "plugin", plugin.Name()) + continue + } + req := &postgresClient.EnrichConfigurationRequest{ + Configs: config, + ClusterDefinition: clusterDefinition, + OperationType: &postgresClient.OperationType{Type: operationType}, + } + res, err := plugin.PostgresClient().EnrichConfiguration(ctx, req) + if err != nil { + return nil, err + } + contextLogger.Debug("received response", "resConfig", res.Configs) + if len(res.Configs) == 0 { + continue + } + tempConfig = res.Configs + } + + return tempConfig, nil +} diff --git a/internal/cnpi/plugin/client/postgres_test.go b/internal/cnpi/plugin/client/postgres_test.go new file mode 100644 index 0000000000..187320fcf4 --- /dev/null +++ b/internal/cnpi/plugin/client/postgres_test.go @@ -0,0 +1,225 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "google.golang.org/grpc" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type fakePostgresClient struct { + enrichConfigResponse *postgres.EnrichConfigurationResult + enrichConfigError error +} + +type fakePostgresConnection struct { + name string + capabilities []postgres.PostgresCapability_RPC_Type + postgresClient *fakePostgresClient + connection.Interface +} + +func (f *fakePostgresClient) GetCapabilities( + _ context.Context, + _ *postgres.PostgresCapabilitiesRequest, + _ ...grpc.CallOption, +) (*postgres.PostgresCapabilitiesResult, error) { + return &postgres.PostgresCapabilitiesResult{ + Capabilities: []*postgres.PostgresCapability{ + { + Type: &postgres.PostgresCapability_Rpc{ + Rpc: &postgres.PostgresCapability_RPC{ + Type: postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION, + }, + }, + }, + }, + }, nil +} + +func (f *fakePostgresClient) EnrichConfiguration( + _ context.Context, + _ *postgres.EnrichConfigurationRequest, + _ ...grpc.CallOption, +) (*postgres.EnrichConfigurationResult, error) { + return f.enrichConfigResponse, f.enrichConfigError +} + +func (f *fakePostgresConnection) Name() string { + return f.name +} + +func (f *fakePostgresConnection) PostgresClient() postgres.PostgresClient { + return f.postgresClient +} + +func (f *fakePostgresConnection) PostgresCapabilities() []postgres.PostgresCapability_RPC_Type { + return f.capabilities +} + +var _ = Describe("EnrichConfiguration", func() { + var ( + d *data + cluster *fakeCluster + config map[string]string + ) + + BeforeEach(func() { + config = map[string]string{"key1": "value1"} + d = &data{plugins: []connection.Interface{}} + + cluster = &fakeCluster{} + }) + + It("should successfully enrich configuration", func(ctx SpecContext) { + postgresClient := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{"key1": "value1", "key2": "value2"}, + }, + } + + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient, + } + + d.plugins = append(d.plugins, plugin) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(HaveKeyWithValue("key1", "value1")) + Expect(config).To(HaveKeyWithValue("key2", "value2")) + }) + + It("should return error when plugin returns error", func(ctx SpecContext) { + expectedErr := newPluginError("plugin error") + + postgresClient := &fakePostgresClient{ + enrichConfigError: expectedErr, + } + + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient, + } + + d.plugins = append(d.plugins, plugin) + + _, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).To(HaveOccurred()) + Expect(err).To(Equal(expectedErr)) + }) + + It("should skip plugins without required capability", func(ctx SpecContext) { + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{}, + } + + d.plugins = append(d.plugins, plugin) + + origMap := cloneMap(config) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(BeEquivalentTo(origMap)) + }) + + It("should merge configurations from multiple plugins", func(ctx SpecContext) { + postgresClient1 := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{"key2": "value2"}, + }, + } + + plugin1 := &fakePostgresConnection{ + name: "plugin1", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient1, + } + + postgresClient2 := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + }, + }, + } + + plugin2 := &fakePostgresConnection{ + name: "plugin2", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient2, + } + + d.plugins = append(d.plugins, plugin1, plugin2) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(HaveKeyWithValue("key1", "value1")) + Expect(config).To(HaveKeyWithValue("key2", "value2")) + Expect(config).To(HaveKeyWithValue("key3", "value3")) + }) + + It("should overwrite existing config key when plugin returns the same key", func(ctx SpecContext) { + postgresClient := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{"key1": "overwritten-value"}, + }, + } + + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient, + } + + d.plugins = append(d.plugins, plugin) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(HaveKeyWithValue("key1", "overwritten-value")) + Expect(config).To(HaveLen(1)) + }) +}) + +func cloneMap(original map[string]string) map[string]string { + clone := make(map[string]string, len(original)) + for k, v := range original { + clone[k] = v + } + return clone +} diff --git a/internal/cnpi/plugin/client/reconciler.go b/internal/cnpi/plugin/client/reconciler.go index 8f1909cfd3..cc0eded125 100644 --- a/internal/cnpi/plugin/client/reconciler.go +++ b/internal/cnpi/plugin/client/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -31,27 +34,34 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) +const cnpgOperatorKey = "cnpg-operator" + // newContinueResult returns a result instructing the reconciliation loop // to continue its operation -func newContinueResult() ReconcilerHookResult { return ReconcilerHookResult{} } +func newContinueResult(identifier string) ReconcilerHookResult { + return ReconcilerHookResult{Identifier: identifier} +} // newTerminateResult returns a result instructing the reconciliation loop to stop // reconciliation -func newTerminateResult() ReconcilerHookResult { return ReconcilerHookResult{StopReconciliation: true} } +func newTerminateResult(identifier string) ReconcilerHookResult { + return ReconcilerHookResult{StopReconciliation: true, Identifier: identifier} +} // newReconcilerRequeueResult creates a new result instructing // a reconciler to schedule a loop in the passed time frame -func newReconcilerRequeueResult(after int64) ReconcilerHookResult { +func newReconcilerRequeueResult(identifier string, after int64) ReconcilerHookResult { return ReconcilerHookResult{ Err: nil, StopReconciliation: true, Result: ctrl.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, + Identifier: identifier, } } // newReconcilerErrorResult creates a new result from an error -func newReconcilerErrorResult(err error) ReconcilerHookResult { - return ReconcilerHookResult{Err: err, StopReconciliation: true} +func newReconcilerErrorResult(identifier string, err error) ReconcilerHookResult { + return ReconcilerHookResult{Err: wrapAsPluginErrorIfNeeded(err), StopReconciliation: true, Identifier: identifier} } func (data *data) PreReconcile(ctx context.Context, cluster client.Object, object client.Object) ReconcilerHookResult { @@ -104,6 +114,7 @@ func reconcilerHook( serializedCluster, err := json.Marshal(cluster) if err != nil { return newReconcilerErrorResult( + cnpgOperatorKey, fmt.Errorf("while serializing %s %s/%s to JSON: %w", cluster.GetObjectKind().GroupVersionKind().Kind, cluster.GetNamespace(), cluster.GetName(), @@ -115,6 +126,7 @@ func reconcilerHook( serializedObject, err := json.Marshal(object) if err != nil { return newReconcilerErrorResult( + cnpgOperatorKey, fmt.Errorf( "while serializing %s %s/%s to JSON: %w", cluster.GetObjectKind().GroupVersionKind().Kind, @@ -139,7 +151,7 @@ func reconcilerHook( contextLogger.Info( "Skipping reconciler hooks for unknown group", "objectGvk", object.GetObjectKind()) - return newContinueResult() + return newContinueResult(cnpgOperatorKey) } for idx := range plugins { @@ -151,20 +163,20 @@ func reconcilerHook( result, err := executeRequest(ctx, plugin.ReconcilerHooksClient(), request) if err != nil { - return newReconcilerErrorResult(err) + return newReconcilerErrorResult(plugin.Name(), err) } switch result.Behavior { case reconciler.ReconcilerHooksResult_BEHAVIOR_TERMINATE: - return newTerminateResult() + return newTerminateResult(plugin.Name()) case reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE: - return newReconcilerRequeueResult(result.GetRequeueAfter()) + return newReconcilerRequeueResult(plugin.Name(), result.GetRequeueAfter()) case reconciler.ReconcilerHooksResult_BEHAVIOR_CONTINUE: - return newContinueResult() + return newContinueResult(plugin.Name()) } } - return newContinueResult() + return newContinueResult(cnpgOperatorKey) } diff --git a/internal/cnpi/plugin/client/restore_job.go b/internal/cnpi/plugin/client/restore_job.go new file mode 100644 index 0000000000..2ea986d120 --- /dev/null +++ b/internal/cnpi/plugin/client/restore_job.go @@ -0,0 +1,76 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "encoding/json" + "errors" + "slices" + + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ErrNoPluginSupportsRestoreJobHooksCapability is raised when no plugin supports the restore job hooks capability +var ErrNoPluginSupportsRestoreJobHooksCapability = errors.New("no plugin supports the restore job hooks capability") + +type gvkEnsurer interface { + EnsureGVKIsPresent() + client.Object +} + +func (data *data) Restore( + ctx context.Context, + cluster gvkEnsurer, +) (*restore.RestoreResponse, error) { + r, err := data.innerRestore(ctx, cluster) + return r, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerRestore( + ctx context.Context, + cluster gvkEnsurer, +) (*restore.RestoreResponse, error) { + cluster.EnsureGVKIsPresent() + + for idx := range data.plugins { + plugin := data.plugins[idx] + + if !slices.Contains(plugin.RestoreJobHooksCapabilities(), restore.RestoreJobHooksCapability_KIND_RESTORE) { + continue + } + + clusterDefinition, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + request := restore.RestoreRequest{ + ClusterDefinition: clusterDefinition, + } + res, err := plugin.RestoreJobHooksClient().Restore(ctx, &request) + if err != nil { + return nil, err + } + return res, nil + } + + return nil, ErrNoPluginSupportsRestoreJobHooksCapability +} diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go index 490518410f..bb6f18a801 100644 --- a/internal/cnpi/plugin/client/suite_test.go +++ b/internal/cnpi/plugin/client/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -23,10 +26,14 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/backup" "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/cloudnative-pg/cnpg-i/pkg/operator" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/cnpg-i/pkg/wal" "google.golang.org/grpc" + k8client "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" @@ -103,6 +110,38 @@ type fakeConnection struct { operatorClient *fakeOperatorClient } +func (f *fakeConnection) MetricsClient() metrics.MetricsClient { + panic("implement me") +} + +func (f *fakeConnection) MetricsCapabilities() []metrics.MetricsCapability_RPC_Type { + panic("implement me") +} + +func (f *fakeConnection) GetMetricsDefinitions(context.Context, k8client.Object) (PluginMetricDefinitions, error) { + panic("implement me") +} + +func (f *fakeConnection) CollectMetrics(context.Context, k8client.Object) ([]*metrics.CollectMetric, error) { + panic("implement me") +} + +func (f *fakeConnection) PostgresClient() postgresClient.PostgresClient { + panic("implement me") +} + +func (f *fakeConnection) PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type { + panic("implement me") +} + +func (f *fakeConnection) RestoreJobHooksClient() restore.RestoreJobHooksClient { + panic("implement me") +} + +func (f *fakeConnection) RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind { + panic("implement me") +} + func (f *fakeConnection) setStatusResponse(status []byte) { f.operatorClient.status = &operator.SetStatusInClusterResponse{ JsonStatus: status, @@ -176,3 +215,7 @@ func (f *fakeConnection) Ping(_ context.Context) error { func (f *fakeConnection) Close() error { panic("not implemented") // TODO: Implement } + +type fakeCluster struct { + k8client.Object +} diff --git a/internal/cnpi/plugin/client/wal.go b/internal/cnpi/plugin/client/wal.go index c4e1bbcede..7e86ebd5be 100644 --- a/internal/cnpi/plugin/client/wal.go +++ b/internal/cnpi/plugin/client/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client @@ -32,6 +35,14 @@ func (data *data) ArchiveWAL( ctx context.Context, cluster client.Object, sourceFileName string, +) error { + return wrapAsPluginErrorIfNeeded(data.innerArchiveWAL(ctx, cluster, sourceFileName)) +} + +func (data *data) innerArchiveWAL( + ctx context.Context, + cluster client.Object, + sourceFileName string, ) error { contextLogger := log.FromContext(ctx) @@ -76,14 +87,24 @@ func (data *data) RestoreWAL( cluster client.Object, sourceWALName string, destinationFileName string, -) error { +) (bool, error) { + b, err := data.innerRestoreWAL(ctx, cluster, sourceWALName, destinationFileName) + return b, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerRestoreWAL( + ctx context.Context, + cluster client.Object, + sourceWALName string, + destinationFileName string, +) (bool, error) { var errorCollector error contextLogger := log.FromContext(ctx) serializedCluster, err := json.Marshal(cluster) if err != nil { - return fmt.Errorf("while serializing %s %s/%s to JSON: %w", + return false, fmt.Errorf("while serializing %s %s/%s to JSON: %w", cluster.GetObjectKind().GroupVersionKind().Kind, cluster.GetNamespace(), cluster.GetName(), err, @@ -114,9 +135,9 @@ func (data *data) RestoreWAL( pluginLogger.Trace("WAL restore via plugin failed, trying next one", "err", err) errorCollector = multierr.Append(errorCollector, err) } else { - return nil + return true, nil } } - return errorCollector + return false, errorCollector } diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index 1a8d46e7a9..f03dcab1d5 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package connection @@ -26,14 +29,18 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/backup" "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/cloudnative-pg/cnpg-i/pkg/operator" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/cnpg-i/pkg/wal" "google.golang.org/grpc" ) -// defaultTimeout is the timeout applied by default to every GRPC call -const defaultTimeout = 30 * time.Second +// defaultNetworkCallTimeout is the timeout applied by default to every GRPC +// call to a plugin in a different Pod +const defaultNetworkCallTimeout = 30 * time.Second // Protocol represents a way to connect to a plugin type Protocol interface { @@ -57,6 +64,9 @@ type Interface interface { WALClient() wal.WALClient BackupClient() backup.BackupClient ReconcilerHooksClient() reconciler.ReconcilerHooksClient + RestoreJobHooksClient() restore.RestoreJobHooksClient + PostgresClient() postgresClient.PostgresClient + MetricsClient() metrics.MetricsClient PluginCapabilities() []identity.PluginCapability_Service_Type OperatorCapabilities() []operator.OperatorCapability_RPC_Type @@ -64,6 +74,9 @@ type Interface interface { LifecycleCapabilities() []*lifecycle.OperatorLifecycleCapabilities BackupCapabilities() []backup.BackupCapability_RPC_Type ReconcilerCapabilities() []reconciler.ReconcilerHooksCapability_Kind + RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind + PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type + MetricsCapabilities() []metrics.MetricsCapability_RPC_Type Ping(ctx context.Context) error Close() error @@ -77,15 +90,21 @@ type data struct { walClient wal.WALClient backupClient backup.BackupClient reconcilerHooksClient reconciler.ReconcilerHooksClient - - name string - version string - capabilities []identity.PluginCapability_Service_Type - operatorCapabilities []operator.OperatorCapability_RPC_Type - walCapabilities []wal.WALCapability_RPC_Type - lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities - backupCapabilities []backup.BackupCapability_RPC_Type - reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind + restoreJobHooksClient restore.RestoreJobHooksClient + postgresClient postgresClient.PostgresClient + metricsClient metrics.MetricsClient + + name string + version string + capabilities []identity.PluginCapability_Service_Type + operatorCapabilities []operator.OperatorCapability_RPC_Type + walCapabilities []wal.WALCapability_RPC_Type + lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities + backupCapabilities []backup.BackupCapability_RPC_Type + reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind + restoreJobHooksCapabilities []restore.RestoreJobHooksCapability_Kind + postgresCapabilities []postgresClient.PostgresCapability_RPC_Type + metricsCapabilities []metrics.MetricsCapability_RPC_Type } func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, error) { @@ -102,16 +121,20 @@ func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, return data{}, fmt.Errorf("while querying plugin identity: %w", err) } - result := data{} - result.connection = connection - result.name = pluginInfoResponse.Name - result.version = pluginInfoResponse.Version - result.identityClient = identity.NewIdentityClient(connection) - result.operatorClient = operator.NewOperatorClient(connection) - result.lifecycleClient = lifecycle.NewOperatorLifecycleClient(connection) - result.walClient = wal.NewWALClient(connection) - result.backupClient = backup.NewBackupClient(connection) - result.reconcilerHooksClient = reconciler.NewReconcilerHooksClient(connection) + result := data{ + connection: connection, + name: pluginInfoResponse.Name, + version: pluginInfoResponse.Version, + identityClient: identity.NewIdentityClient(connection), + operatorClient: operator.NewOperatorClient(connection), + lifecycleClient: lifecycle.NewOperatorLifecycleClient(connection), + walClient: wal.NewWALClient(connection), + backupClient: backup.NewBackupClient(connection), + reconcilerHooksClient: reconciler.NewReconcilerHooksClient(connection), + restoreJobHooksClient: restore.NewRestoreJobHooksClient(connection), + postgresClient: postgresClient.NewPostgresClient(connection), + metricsClient: metrics.NewMetricsClient(connection), + } return result, err } @@ -232,16 +255,81 @@ func (pluginData *data) loadBackupCapabilities(ctx context.Context) error { return nil } +func (pluginData *data) loadRestoreJobHooksCapabilities(ctx context.Context) error { + var restoreJobHooksCapabilitiesResponse *restore.RestoreJobHooksCapabilitiesResult + var err error + + if restoreJobHooksCapabilitiesResponse, err = pluginData.restoreJobHooksClient.GetCapabilities( + ctx, + &restore.RestoreJobHooksCapabilitiesRequest{}, + ); err != nil { + return fmt.Errorf("while querying plugin operator capabilities: %w", err) + } + + pluginData.restoreJobHooksCapabilities = make( + []restore.RestoreJobHooksCapability_Kind, + len(restoreJobHooksCapabilitiesResponse.Capabilities)) + for i := range pluginData.restoreJobHooksCapabilities { + pluginData.restoreJobHooksCapabilities[i] = restoreJobHooksCapabilitiesResponse.Capabilities[i].Kind + } + + return nil +} + +func (pluginData *data) loadPostgresCapabilities(ctx context.Context) error { + var postgresCapabilitiesResponse *postgresClient.PostgresCapabilitiesResult + var err error + + if postgresCapabilitiesResponse, err = pluginData.postgresClient.GetCapabilities( + ctx, + &postgresClient.PostgresCapabilitiesRequest{}, + ); err != nil { + return fmt.Errorf("while querying plugin operator capabilities: %w", err) + } + + pluginData.postgresCapabilities = make( + []postgresClient.PostgresCapability_RPC_Type, + len(postgresCapabilitiesResponse.Capabilities)) + for i := range pluginData.postgresCapabilities { + pluginData.postgresCapabilities[i] = postgresCapabilitiesResponse.Capabilities[i].GetRpc().Type + } + + return nil +} + +func (pluginData *data) loadMetricsCapabilities(ctx context.Context) error { + var metricsCapabilitiesResponse *metrics.MetricsCapabilitiesResult + var err error + + if metricsCapabilitiesResponse, err = pluginData.metricsClient.GetCapabilities( + ctx, + &metrics.MetricsCapabilitiesRequest{}, + ); err != nil { + return fmt.Errorf("while querying plugin metrics capabilities: %w", err) + } + + pluginData.metricsCapabilities = make( + []metrics.MetricsCapability_RPC_Type, + len(metricsCapabilitiesResponse.Capabilities)) + for i := range pluginData.metricsCapabilities { + pluginData.metricsCapabilities[i] = metricsCapabilitiesResponse.Capabilities[i].GetRpc().Type + } + + return nil +} + // Metadata extracts the plugin metadata reading from // the internal metadata func (pluginData *data) Metadata() Metadata { result := Metadata{ - Name: pluginData.name, - Version: pluginData.version, - Capabilities: make([]string, len(pluginData.capabilities)), - OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)), - WALCapabilities: make([]string, len(pluginData.walCapabilities)), - BackupCapabilities: make([]string, len(pluginData.backupCapabilities)), + Name: pluginData.name, + Version: pluginData.version, + Capabilities: make([]string, len(pluginData.capabilities)), + OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)), + WALCapabilities: make([]string, len(pluginData.walCapabilities)), + BackupCapabilities: make([]string, len(pluginData.backupCapabilities)), + RestoreJobHookCapabilities: make([]string, len(pluginData.restoreJobHooksCapabilities)), + PostgresCapabilities: make([]string, len(pluginData.postgresCapabilities)), } for i := range pluginData.capabilities { @@ -260,6 +348,10 @@ func (pluginData *data) Metadata() Metadata { result.BackupCapabilities[i] = pluginData.backupCapabilities[i].String() } + for i := range pluginData.restoreJobHooksCapabilities { + result.RestoreJobHookCapabilities[i] = pluginData.restoreJobHooksCapabilities[i].String() + } + return result } @@ -288,10 +380,22 @@ func (pluginData *data) BackupClient() backup.BackupClient { return pluginData.backupClient } +func (pluginData *data) RestoreJobHooksClient() restore.RestoreJobHooksClient { + return pluginData.restoreJobHooksClient +} + func (pluginData *data) ReconcilerHooksClient() reconciler.ReconcilerHooksClient { return pluginData.reconcilerHooksClient } +func (pluginData *data) PostgresClient() postgresClient.PostgresClient { + return pluginData.postgresClient +} + +func (pluginData *data) MetricsClient() metrics.MetricsClient { + return pluginData.metricsClient +} + func (pluginData *data) PluginCapabilities() []identity.PluginCapability_Service_Type { return pluginData.capabilities } @@ -316,6 +420,18 @@ func (pluginData *data) ReconcilerCapabilities() []reconciler.ReconcilerHooksCap return pluginData.reconcilerCapabilities } +func (pluginData *data) RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind { + return pluginData.restoreJobHooksCapabilities +} + +func (pluginData *data) PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type { + return pluginData.postgresCapabilities +} + +func (pluginData *data) MetricsCapabilities() []metrics.MetricsCapability_RPC_Type { + return pluginData.metricsCapabilities +} + func (pluginData *data) Ping(ctx context.Context) error { _, err := pluginData.identityClient.Probe(ctx, &identity.ProbeRequest{}) return err @@ -374,5 +490,29 @@ func LoadPlugin(ctx context.Context, handler Handler) (Interface, error) { } } + // If the plugin implements the restore job hooks, load its + // capabilities + if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_RESTORE_JOB) { + if err = result.loadRestoreJobHooksCapabilities(ctx); err != nil { + return nil, err + } + } + + // If the plugin implements the postgres service, load its + // capabilities + if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_POSTGRES) { + if err = result.loadPostgresCapabilities(ctx); err != nil { + return nil, err + } + } + + // If the plugin implements the metrics service, load its + // capabilities + if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_METRICS) { + if err = result.loadMetricsCapabilities(ctx); err != nil { + return nil, err + } + } + return &result, nil } diff --git a/internal/cnpi/plugin/connection/doc.go b/internal/cnpi/plugin/connection/doc.go index 01cef3f037..78f836279e 100644 --- a/internal/cnpi/plugin/connection/doc.go +++ b/internal/cnpi/plugin/connection/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package connection represents a connected CNPG-i plugin diff --git a/internal/cnpi/plugin/connection/metadata.go b/internal/cnpi/plugin/connection/metadata.go index 21f28652c3..18643faebc 100644 --- a/internal/cnpi/plugin/connection/metadata.go +++ b/internal/cnpi/plugin/connection/metadata.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package connection @@ -19,10 +22,12 @@ package connection // Metadata expose the metadata as discovered // from a plugin type Metadata struct { - Name string - Version string - Capabilities []string - OperatorCapabilities []string - WALCapabilities []string - BackupCapabilities []string + Name string + Version string + Capabilities []string + OperatorCapabilities []string + WALCapabilities []string + BackupCapabilities []string + RestoreJobHookCapabilities []string + PostgresCapabilities []string } diff --git a/internal/cnpi/plugin/connection/remote.go b/internal/cnpi/plugin/connection/remote.go index 73285c4182..15d0fa93a9 100644 --- a/internal/cnpi/plugin/connection/remote.go +++ b/internal/cnpi/plugin/connection/remote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package connection @@ -38,7 +41,7 @@ func (p *ProtocolTCP) Dial(_ context.Context) (Handler, error) { p.Address, grpc.WithTransportCredentials(credentials.NewTLS(p.TLSConfig)), grpc.WithUnaryInterceptor( - timeout.UnaryClientInterceptor(defaultTimeout), + timeout.UnaryClientInterceptor(defaultNetworkCallTimeout), ), ) } diff --git a/internal/cnpi/plugin/connection/unix.go b/internal/cnpi/plugin/connection/unix.go index 93495ba4eb..fe39771fde 100644 --- a/internal/cnpi/plugin/connection/unix.go +++ b/internal/cnpi/plugin/connection/unix.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package connection represents a connected CNPG-i plugin @@ -22,7 +25,6 @@ import ( "fmt" "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/timeout" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ) @@ -40,9 +42,5 @@ func (p ProtocolUnix) Dial(ctx context.Context) (Handler, error) { return grpc.NewClient( dialPath, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithUnaryInterceptor( - timeout.UnaryClientInterceptor(defaultTimeout), - ), - ) + grpc.WithTransportCredentials(insecure.NewCredentials())) } diff --git a/internal/cnpi/plugin/doc.go b/internal/cnpi/plugin/doc.go index c642a90b43..5b59b41c15 100644 --- a/internal/cnpi/plugin/doc.go +++ b/internal/cnpi/plugin/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package plugin contains the logics that acts as bridge between cnpg-i and the operator diff --git a/internal/cnpi/plugin/mapping.go b/internal/cnpi/plugin/mapping.go index 7fc5b67613..a408a3024a 100644 --- a/internal/cnpi/plugin/mapping.go +++ b/internal/cnpi/plugin/mapping.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin @@ -22,19 +25,20 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" ) -// The OperationVerb corresponds to the Kubernetes API method +// The OperationVerb corresponds to the CNPG-I lifecycle operation verb type OperationVerb string -// A Kubernetes operation verb +// A lifecycle operation verb const ( - OperationVerbPatch OperationVerb = "PATCH" - OperationVerbUpdate OperationVerb = "UPDATE" - OperationVerbCreate OperationVerb = "CREATE" - OperationVerbDelete OperationVerb = "DELETE" + OperationVerbPatch OperationVerb = "PATCH" + OperationVerbUpdate OperationVerb = "UPDATE" + OperationVerbCreate OperationVerb = "CREATE" + OperationVerbDelete OperationVerb = "DELETE" + OperationVerbEvaluate OperationVerb = "EVALUATE" ) // ToOperationType_Type converts an OperationVerb into a lifecycle.OperationType_Type -// nolint: revive,stylecheck +// nolint: revive,staticcheck func (o OperationVerb) ToOperationType_Type() (lifecycle.OperatorOperationType_Type, error) { switch o { case OperationVerbPatch: @@ -45,6 +49,8 @@ func (o OperationVerb) ToOperationType_Type() (lifecycle.OperatorOperationType_T return lifecycle.OperatorOperationType_TYPE_CREATE, nil case OperationVerbUpdate: return lifecycle.OperatorOperationType_TYPE_UPDATE, nil + case OperationVerbEvaluate: + return lifecycle.OperatorOperationType_TYPE_EVALUATE, nil } return lifecycle.OperatorOperationType_Type(0), fmt.Errorf("unknown operation type: '%s'", o) diff --git a/internal/cnpi/plugin/operatorclient/client.go b/internal/cnpi/plugin/operatorclient/client.go index 7a4aa84456..38dec3a931 100644 --- a/internal/cnpi/plugin/operatorclient/client.go +++ b/internal/cnpi/plugin/operatorclient/client.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operatorclient @@ -26,7 +29,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) type extendedClient struct { @@ -47,14 +50,14 @@ func (e *extendedClient) invokePlugin( ) (client.Object, error) { contextLogger := log.FromContext(ctx).WithName("invokePlugin") - cluster, ok := ctx.Value(utils.ContextKeyCluster).(client.Object) + cluster, ok := ctx.Value(contextutils.ContextKeyCluster).(client.Object) if !ok || cluster == nil { contextLogger.Trace("skipping invokePlugin, cannot find the cluster inside the context") return obj, nil } - pluginClient, ok := ctx.Value(utils.PluginClientKey).(cnpgiClient.Client) - if !ok || pluginClient == nil { + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) + if pluginClient == nil { contextLogger.Trace("skipping invokePlugin, cannot find the plugin client inside the context") return obj, nil } diff --git a/internal/cnpi/plugin/operatorclient/client_test.go b/internal/cnpi/plugin/operatorclient/client_test.go index aaaa975948..f8b52a1803 100644 --- a/internal/cnpi/plugin/operatorclient/client_test.go +++ b/internal/cnpi/plugin/operatorclient/client_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operatorclient @@ -26,7 +29,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" pluginclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -75,8 +78,8 @@ var _ = Describe("extendedClient", func() { It("invokePlugin", func(ctx SpecContext) { fakeCrd := &fakeClusterCRD{} - newCtx := context.WithValue(ctx, utils.ContextKeyCluster, fakeCrd) - newCtx = context.WithValue(newCtx, utils.PluginClientKey, pluginClient) + newCtx := context.WithValue(ctx, contextutils.ContextKeyCluster, fakeCrd) + newCtx = context.WithValue(newCtx, contextutils.PluginClientKey, pluginClient) By("ensuring it works the first invocation", func() { obj, err := c.invokePlugin(newCtx, plugin.OperationVerbCreate, &corev1.Pod{}) diff --git a/internal/cnpi/plugin/operatorclient/doc.go b/internal/cnpi/plugin/operatorclient/doc.go index 89ddcf2ee0..4d339fc358 100644 --- a/internal/cnpi/plugin/operatorclient/doc.go +++ b/internal/cnpi/plugin/operatorclient/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package operatorclient contains an extended kubernetes client that supports plugin API calls diff --git a/internal/cnpi/plugin/operatorclient/suite_test.go b/internal/cnpi/plugin/operatorclient/suite_test.go index 52a34e0df9..a48d27f210 100644 --- a/internal/cnpi/plugin/operatorclient/suite_test.go +++ b/internal/cnpi/plugin/operatorclient/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operatorclient diff --git a/internal/cnpi/plugin/repository/connection.go b/internal/cnpi/plugin/repository/connection.go index 3a563ac6a5..c7879902c1 100644 --- a/internal/cnpi/plugin/repository/connection.go +++ b/internal/cnpi/plugin/repository/connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package repository @@ -26,6 +29,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) +// maxConnectionAttempts is the maximum number of connections attempts to a +// plugin. maxConnectionAttempts should be higher or equal to maxPoolSize +const maxConnectionAttempts = 5 + type releasingConnection struct { connection.Interface closer func() error @@ -51,7 +58,7 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter var resource *puddle.Resource[connection.Interface] var err error - for i := 0; i < maxPoolSize; i++ { + for i := 0; i < maxConnectionAttempts; i++ { contextLogger.Trace("try getting connection") resource, err = pool.Acquire(ctx) if err != nil { @@ -60,7 +67,10 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter err = resource.Value().Ping(ctx) if err != nil { - contextLogger.Debug("Detected plugin connection error, closing the connection and trying again") + contextLogger.Warning( + "Detected stale/broken plugin connection, closing and trying again", + "pluginName", name, + "err", err) resource.Destroy() } else { break @@ -71,9 +81,17 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter return nil, fmt.Errorf("while getting plugin connection: %w", err) } + contextLogger.Trace( + "Acquired logical plugin connection", + "name", name, + ) return &releasingConnection{ Interface: resource.Value(), closer: func() error { + contextLogger.Trace( + "Released logical plugin connection", + "name", name, + ) // When the client has done its job with a plugin connection, it // will be returned to the pool resource.Release() diff --git a/internal/cnpi/plugin/repository/doc.go b/internal/cnpi/plugin/repository/doc.go index 232d249998..03b3cbeb2a 100644 --- a/internal/cnpi/plugin/repository/doc.go +++ b/internal/cnpi/plugin/repository/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package repository contains the plugin discovery and diff --git a/internal/cnpi/plugin/repository/errors.go b/internal/cnpi/plugin/repository/errors.go index 4dc180bd9c..bfe6a3e504 100644 --- a/internal/cnpi/plugin/repository/errors.go +++ b/internal/cnpi/plugin/repository/errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package repository diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go index 76da6773e8..c96a824e7e 100644 --- a/internal/cnpi/plugin/repository/setup.go +++ b/internal/cnpi/plugin/repository/setup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package repository @@ -25,6 +28,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/puddle/v2" + "go.uber.org/multierr" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) @@ -33,16 +37,18 @@ import ( type Interface interface { // ForgetPlugin closes every connection to the plugin with the passed name // and forgets its discovery info. - // If the plug in was not available in the repository, this is a no-op + // This operation is synchronous and blocks until every connection is closed. + // If the plugin was not available in the repository, this is a no-op. ForgetPlugin(name string) // RegisterRemotePlugin registers a plugin available on a remote - // TCP entrypoint + // TCP entrypoint. RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error // RegisterUnixSocketPluginsInPath scans the passed directory - // for plugins that are deployed with unix sockets - RegisterUnixSocketPluginsInPath(pluginsPath string) error + // for plugins that are deployed with unix sockets. + // Return the list of loaded plugin names + RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) // GetConnection gets a connection to the plugin with specified name GetConnection(ctx context.Context, name string) (connection.Interface, error) @@ -58,61 +64,84 @@ type data struct { pluginConnectionPool map[string]*puddle.Pool[connection.Interface] } +// pluginSetupOptions are the options to be used when setting up +// a plugin connection +type pluginSetupOptions struct { + // forceRegistration forces the creation of a new plugin connection + // even if one already exists. The existing connection will be closed. + forceRegistration bool +} + +// maxPoolSize is the maximum number of connections in a plugin's connection +// pool const maxPoolSize = 5 -func (r *data) setPluginProtocol(name string, protocol connection.Protocol) error { - r.mux.Lock() - defer r.mux.Unlock() +func pluginConnectionConstructor(name string, protocol connection.Protocol) puddle.Constructor[connection.Interface] { + return func(ctx context.Context) (connection.Interface, error) { + logger := log. + FromContext(ctx). + WithName("setPluginProtocol"). + WithValues("pluginName", name) + ctx = log.IntoContext(ctx, logger) - if r.pluginConnectionPool == nil { - r.pluginConnectionPool = make(map[string]*puddle.Pool[connection.Interface]) - } + logger.Trace("Connecting to plugin") + var ( + result connection.Interface + handler connection.Handler + err error + ) + + if handler, err = protocol.Dial(ctx); err != nil { + logger.Error(err, "Error while connecting to plugin (physical)") + return nil, err + } - _, ok := r.pluginConnectionPool[name] - if ok { - return &ErrPluginAlreadyRegistered{ - Name: name, + if result, err = connection.LoadPlugin(ctx, handler); err != nil { + logger.Error(err, "Error while connecting to plugin (logical)") + _ = handler.Close() + return nil, err } + + return result, err } +} - constructor := func(ctx context.Context) (res connection.Interface, err error) { - var handler connection.Handler +func pluginConnectionDestructor(res connection.Interface) { + logger := log.FromContext(context.Background()). + WithName("pluginConnectionDestructor"). + WithValues("pluginName", res.Name()) - defer func() { - if err != nil && handler != nil { - _ = handler.Close() - } - }() + logger.Trace("Released physical plugin connection") - constructorLogger := log. - FromContext(ctx). - WithName("setPluginProtocol"). - WithValues("pluginName", name) - ctx = log.IntoContext(ctx, constructorLogger) + err := res.Close() + if err != nil { + logger.Warning("Error while closing plugin connection", "err", err) + } +} - if handler, err = protocol.Dial(ctx); err != nil { - constructorLogger.Error(err, "Got error while connecting to plugin") - return nil, err - } +func (r *data) setPluginProtocol(name string, protocol connection.Protocol, opts pluginSetupOptions) error { + r.mux.Lock() + defer r.mux.Unlock() - return connection.LoadPlugin(ctx, handler) + if r.pluginConnectionPool == nil { + r.pluginConnectionPool = make(map[string]*puddle.Pool[connection.Interface]) } - destructor := func(res connection.Interface) { - err := res.Close() - if err != nil { - destructorLogger := log.FromContext(context.Background()). - WithName("setPluginProtocol"). - WithValues("pluginName", res.Name()) - destructorLogger.Warning("Error while closing plugin connection", "err", err) + if oldPool, alreadyRegistered := r.pluginConnectionPool[name]; alreadyRegistered { + if opts.forceRegistration { + oldPool.Close() + } else { + return &ErrPluginAlreadyRegistered{ + Name: name, + } } } var err error r.pluginConnectionPool[name], err = puddle.NewPool( &puddle.Config[connection.Interface]{ - Constructor: constructor, - Destructor: destructor, + Constructor: pluginConnectionConstructor(name, protocol), + Destructor: pluginConnectionDestructor, MaxSize: maxPoolSize, }, ) @@ -131,48 +160,64 @@ func (r *data) ForgetPlugin(name string) { return } - // TODO(leonardoce): should we really wait for all the plugin connections - // to be closed? pool.Close() + delete(r.pluginConnectionPool, name) } // registerUnixSocketPlugin registers a plugin available at the passed // unix socket path func (r *data) registerUnixSocketPlugin(name, path string) error { - return r.setPluginProtocol(name, connection.ProtocolUnix(path)) + return r.setPluginProtocol(name, connection.ProtocolUnix(path), pluginSetupOptions{ + // Forcing the registration of a Unix socket plugin has no meaning + // because they can be installed and started only when the Pod is created. + forceRegistration: false, + }) } func (r *data) RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error { - return r.setPluginProtocol(name, &connection.ProtocolTCP{ + protocol := &connection.ProtocolTCP{ TLSConfig: tlsConfig, Address: address, - }) + } + + // The RegisterRemotePlugin function is called when the plugin is registered for + // the first time and when the certificates of an existing plugin get refreshed. + // In the second case, the plugin loading will be forced and all existing + // connections will be dropped and recreated. + opts := pluginSetupOptions{ + forceRegistration: true, + } + return r.setPluginProtocol(name, protocol, opts) } -func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) error { +func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) { entries, err := os.ReadDir(pluginsPath) if err != nil { // There's no need to complain if the plugin folder doesn't exist if os.IsNotExist(err) { - return nil + return nil, nil } // Otherwise, this means we can't read that folder and // is a real problem - return err + return nil, err } + pluginsNames := make([]string, 0, len(entries)) + var errors error for _, entry := range entries { name := entry.Name() if err := r.registerUnixSocketPlugin( name, path.Join(pluginsPath, name), ); err != nil { - return err + errors = multierr.Append(errors, err) + } else { + pluginsNames = append(pluginsNames, name) } } - return nil + return pluginsNames, errors } // New creates a new plugin repository diff --git a/internal/cnpi/plugin/repository/setup_test.go b/internal/cnpi/plugin/repository/setup_test.go new file mode 100644 index 0000000000..f680e2b48e --- /dev/null +++ b/internal/cnpi/plugin/repository/setup_test.go @@ -0,0 +1,81 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package repository + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Set Plugin Protocol", func() { + var repository *data + + BeforeEach(func() { + repository = &data{} + }) + + It("creates connection pool for new plugin", func() { + err := repository.setPluginProtocol("plugin1", newUnitTestProtocol("test"), pluginSetupOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(repository.pluginConnectionPool).To(HaveKey("plugin1")) + }) + + It("fails when adding same plugin name without forceRegistration", func() { + err := repository.setPluginProtocol("plugin1", newUnitTestProtocol("/tmp/socket"), pluginSetupOptions{}) + Expect(err).NotTo(HaveOccurred()) + + err = repository.setPluginProtocol("plugin1", newUnitTestProtocol("/tmp/socket2"), pluginSetupOptions{}) + Expect(err).To(BeEquivalentTo(&ErrPluginAlreadyRegistered{Name: "plugin1"})) + }) + + It("overwrites existing plugin when forceRegistration is true", func() { + first := newUnitTestProtocol("/tmp/socket") + err := repository.setPluginProtocol("plugin1", first, pluginSetupOptions{}) + Expect(err).NotTo(HaveOccurred()) + pool1 := repository.pluginConnectionPool["plugin1"] + + ctx1, cancel := context.WithCancel(context.Background()) + conn1, err := pool1.Acquire(ctx1) + Expect(err).NotTo(HaveOccurred()) + Expect(conn1).NotTo(BeNil()) + cancel() + conn1.Release() + + second := newUnitTestProtocol("/tmp/socket2") + err = repository.setPluginProtocol("plugin1", second, pluginSetupOptions{forceRegistration: true}) + Expect(err).NotTo(HaveOccurred()) + pool2 := repository.pluginConnectionPool["plugin1"] + + ctx2, cancel := context.WithCancel(context.Background()) + conn2, err := pool2.Acquire(ctx2) + Expect(err).NotTo(HaveOccurred()) + Expect(conn2).NotTo(BeNil()) + cancel() + conn2.Release() + + Expect(pool1).NotTo(Equal(pool2)) + Expect(first.mockHandlers).To(HaveLen(1)) + Expect(first.mockHandlers[0].closed).To(BeTrue()) + Expect(second.mockHandlers).To(HaveLen(1)) + Expect(second.mockHandlers[0].closed).To(BeFalse()) + }) +}) diff --git a/internal/cnpi/plugin/repository/suite_test.go b/internal/cnpi/plugin/repository/suite_test.go new file mode 100644 index 0000000000..2672d79d7d --- /dev/null +++ b/internal/cnpi/plugin/repository/suite_test.go @@ -0,0 +1,138 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package repository + +import ( + "context" + "net" + "testing" + + "github.com/cloudnative-pg/cnpg-i/pkg/identity" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestRepository(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Repository Suite") +} + +type identityImplementation struct { + identity.UnimplementedIdentityServer +} + +// GetPluginMetadata implements Identity +func (i identityImplementation) GetPluginMetadata( + _ context.Context, + _ *identity.GetPluginMetadataRequest, +) (*identity.GetPluginMetadataResponse, error) { + return &identity.GetPluginMetadataResponse{ + Name: "testing-service", + Version: "0.0.1", + DisplayName: "testing-service", + ProjectUrl: "https://github.com/cloudnative-pg/cloudnative-pg", + RepositoryUrl: "https://github.com/cloudnative-pg/cloudnative-pg", + License: "APACHE 2.0", + Maturity: "alpha", + }, nil +} + +// GetPluginCapabilities implements identity +func (i identityImplementation) GetPluginCapabilities( + _ context.Context, + _ *identity.GetPluginCapabilitiesRequest, +) (*identity.GetPluginCapabilitiesResponse, error) { + return &identity.GetPluginCapabilitiesResponse{ + Capabilities: []*identity.PluginCapability{}, + }, nil +} + +// Probe implements Identity +func (i identityImplementation) Probe( + _ context.Context, + _ *identity.ProbeRequest, +) (*identity.ProbeResponse, error) { + return &identity.ProbeResponse{ + Ready: true, + }, nil +} + +type unitTestProtocol struct { + name string + mockHandlers []*mockHandler + server *grpc.Server +} + +type mockHandler struct { + *grpc.ClientConn + closed bool +} + +func newUnitTestProtocol(name string) *unitTestProtocol { + return &unitTestProtocol{name: name} +} + +func (h *mockHandler) Close() error { + _ = h.ClientConn.Close() + h.closed = true + return nil +} + +func (p *unitTestProtocol) Dial(ctx context.Context) (connection.Handler, error) { + listener := bufconn.Listen(1024 * 1024) + + if len(p.mockHandlers) == 0 { + p.server = grpc.NewServer() + + identity.RegisterIdentityServer(p.server, &identityImplementation{}) + + go func() { + <-ctx.Done() + p.server.Stop() + }() + + go func() { + _ = p.server.Serve(listener) + }() + } + + dialer := func(_ context.Context, _ string) (net.Conn, error) { + return listener.Dial() + } + + conn, err := grpc.NewClient("passthrough://bufnet", + grpc.WithContextDialer(dialer), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return nil, err + } + mh := &mockHandler{ + ClientConn: conn, + } + p.mockHandlers = append(p.mockHandlers, mh) + return mh, nil +} diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index b2b2a7e6d6..c88905f847 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package configuration contains the configuration of the operator, reading @@ -21,6 +24,7 @@ package configuration import ( "path" "strings" + "time" "github.com/cloudnative-pg/machinery/pkg/log" @@ -39,8 +43,33 @@ const ( // ExpiringCheckThreshold is the default threshold to consider a certificate as expiring ExpiringCheckThreshold = 7 + + // DefaultKubernetesClusterDomain is the default value used as + // Kubernetes cluster domain. + DefaultKubernetesClusterDomain = "cluster.local" ) +// DefaultDrainTaints is the default list of taints the operator will watch and treat +// as Unschedule +var DefaultDrainTaints = []string{ + // Kubernetes well-known unschedulable taint + // See: https://kubernetes.io/docs/reference/labels-annotations-taints/#node-kubernetes-io-unschedulable + "node.kubernetes.io/unschedulable", + + // Used by the Kubernetes Cluster Autoscaler + // nolint: lll + // See: https://github.com/kubernetes/autoscaler/blob/aa1d413ea3bf319b56c7b2e65ade1a028e149439/cluster-autoscaler/cloudprovider/oci/nodepools/consts/annotations.go#L27 + "ToBeDeletedByClusterAutoscaler", + + // Used by Karpenter termination controller + // See: https://karpenter.sh/docs/concepts/disruption/#termination-controller + "karpenter.sh/disrupted", + + // Used by Karpenter disruption controller + // See: https://karpenter.sh/v0.32/concepts/disruption/#disruption-controller + "karpenter.sh/disruption", +} + // DefaultPluginSocketDir is the default directory where the plugin sockets are located. const DefaultPluginSocketDir = "/plugins" @@ -95,9 +124,6 @@ type Data struct { // replacing the executable in a pod without restarting EnableInstanceManagerInplaceUpdates bool `json:"enableInstanceManagerInplaceUpdates" env:"ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES"` //nolint - // EnableAzurePVCUpdates enables the live update of PVC in Azure environment - EnableAzurePVCUpdates bool `json:"enableAzurePVCUpdates" env:"ENABLE_AZURE_PVC_UPDATES"` - // This is the lifetime of the generated certificates CertificateDuration int `json:"certificateDuration" env:"CERTIFICATE_DURATION"` @@ -108,9 +134,37 @@ type Data struct { // the -any service. Defaults to false. CreateAnyService bool `json:"createAnyService" env:"CREATE_ANY_SERVICE"` + // The duration (in seconds) to wait between the roll-outs of different + // clusters during an operator upgrade. This setting controls the + // timing of upgrades across clusters, spreading them out to reduce + // system impact. The default value is 0, which means no delay between + // PostgreSQL cluster upgrades. + ClustersRolloutDelay int `json:"clustersRolloutDelay" env:"CLUSTERS_ROLLOUT_DELAY"` + + // The duration (in seconds) to wait between roll-outs of individual + // PostgreSQL instances within the same cluster during an operator + // upgrade. The default value is 0, meaning no delay between upgrades + // of instances in the same PostgreSQL cluster. + InstancesRolloutDelay int `json:"instancesRolloutDelay" env:"INSTANCES_ROLLOUT_DELAY"` + // IncludePlugins is a comma-separated list of plugins to always be // included in the Cluster reconciliation IncludePlugins string `json:"includePlugins" env:"INCLUDE_PLUGINS"` + + // StandbyTCPUserTimeout configuration parameter allows you to + // specify a custom TCP user timeout for the standby PostgreSQL + // server's connection to the primary server. This timeout is + // added as a tcp_user_timeout option to the primary_conninfo + // string, which is used by the standby server to connect to the + // primary server in CloudNativePG. + StandbyTCPUserTimeout int `json:"standbyTcpUserTimeout" env:"STANDBY_TCP_USER_TIMEOUT"` + + // KubernetesClusterDomain defines the domain suffix for service FQDNs + // within the Kubernetes cluster. If left unset, it defaults to `cluster.local`. + KubernetesClusterDomain string `json:"kubernetesClusterDomain" env:"KUBERNETES_CLUSTER_DOMAIN"` + + // DrainTaints is a list of taints the operator will watch and treat as Unschedule + DrainTaints []string `json:"drainTaints" env:"DRAIN_TAINTS"` } // Current is the configuration used by the operator @@ -119,13 +173,16 @@ var Current = NewConfiguration() // newDefaultConfig creates a configuration holding the defaults func newDefaultConfig() *Data { return &Data{ - OperatorPullSecretName: DefaultOperatorPullSecretName, - OperatorImageName: versions.DefaultOperatorImageName, - PostgresImageName: versions.DefaultImageName, - PluginSocketDir: DefaultPluginSocketDir, - CreateAnyService: false, - CertificateDuration: CertificateDuration, - ExpiringCheckThreshold: ExpiringCheckThreshold, + OperatorPullSecretName: DefaultOperatorPullSecretName, + OperatorImageName: versions.DefaultOperatorImageName, + PostgresImageName: versions.DefaultImageName, + PluginSocketDir: DefaultPluginSocketDir, + CreateAnyService: false, + CertificateDuration: CertificateDuration, + ExpiringCheckThreshold: ExpiringCheckThreshold, + StandbyTCPUserTimeout: 0, + KubernetesClusterDomain: DefaultKubernetesClusterDomain, + DrainTaints: DefaultDrainTaints, } } @@ -139,7 +196,7 @@ func NewConfiguration() *Data { // ReadConfigMap reads the configuration from the environment and the passed in data map func (config *Data) ReadConfigMap(data map[string]string) { - configparser.ReadConfigMap(config, newDefaultConfig(), data, configparser.OsEnvironment{}) + configparser.ReadConfigMap(config, newDefaultConfig(), data) } // IsAnnotationInherited checks if an annotation with a certain name should @@ -154,6 +211,17 @@ func (config *Data) IsLabelInherited(name string) bool { return evaluateGlobPatterns(config.InheritedLabels, name) } +// GetClustersRolloutDelay gets the delay between roll-outs of different clusters +func (config *Data) GetClustersRolloutDelay() time.Duration { + return time.Duration(config.ClustersRolloutDelay) * time.Second +} + +// GetInstancesRolloutDelay gets the delay between roll-outs of pods belonging +// to the same cluster +func (config *Data) GetInstancesRolloutDelay() time.Duration { + return time.Duration(config.InstancesRolloutDelay) * time.Second +} + // WatchedNamespaces get the list of additional watched namespaces. // The result is a list of namespaces specified in the WATCHED_NAMESPACE where // each namespace is separated by comma diff --git a/internal/configuration/configuration_test.go b/internal/configuration/configuration_test.go index 51c5be0d1e..b58bbe3b8b 100644 --- a/internal/configuration/configuration_test.go +++ b/internal/configuration/configuration_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configuration import ( + "time" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -145,4 +150,24 @@ var _ = Describe("Annotation and label inheritance", func() { }).GetIncludePlugins()).To(ContainElements("a", "b", "c")) }) }) + + It("returns correct delay for clusters rollout", func() { + config := Data{ClustersRolloutDelay: 10} + Expect(config.GetClustersRolloutDelay()).To(Equal(10 * time.Second)) + }) + + It("returns zero as default delay for clusters rollout when not set", func() { + config := Data{} + Expect(config.GetClustersRolloutDelay()).To(BeZero()) + }) + + It("returns correct delay for instances rollout", func() { + config := Data{InstancesRolloutDelay: 5} + Expect(config.GetInstancesRolloutDelay()).To(Equal(5 * time.Second)) + }) + + It("returns zero as default delay for instances rollout when not set", func() { + config := Data{} + Expect(config.GetInstancesRolloutDelay()).To(BeZero()) + }) }) diff --git a/internal/configuration/suite_test.go b/internal/configuration/suite_test.go index 76e06703a6..d26575cd1d 100644 --- a/internal/configuration/suite_test.go +++ b/internal/configuration/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configuration diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index b1d0745f44..ef14de30c2 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -21,6 +24,7 @@ import ( "errors" "fmt" "reflect" + "slices" "time" "github.com/cloudnative-pg/machinery/pkg/log" @@ -45,11 +49,11 @@ import ( cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/backup/volumesnapshot" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" + resourcestatus "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -60,7 +64,7 @@ const backupPhase = ".status.phase" // clusterName indicates the path inside the Backup kind // where the name of the cluster is written -const clusterName = ".spec.cluster.name" +const clusterNameField = ".spec.cluster.name" // BackupReconciler reconciles a Backup object type BackupReconciler struct { @@ -71,7 +75,8 @@ type BackupReconciler struct { Recorder record.EventRecorder Plugins repository.Interface - instanceStatusClient instance.Client + instanceStatusClient remote.InstanceClient + vsr *volumesnapshot.Reconciler } // NewBackupReconciler properly initializes the BackupReconciler @@ -80,13 +85,16 @@ func NewBackupReconciler( discoveryClient *discovery.DiscoveryClient, plugins repository.Interface, ) *BackupReconciler { + cli := mgr.GetClient() + recorder := mgr.GetEventRecorderFor("cloudnative-pg-backup") return &BackupReconciler{ - Client: mgr.GetClient(), + Client: cli, DiscoveryClient: discoveryClient, Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cloudnative-pg-backup"), - instanceStatusClient: instance.NewStatusClient(), + Recorder: recorder, + instanceStatusClient: remote.NewClient().Instance(), Plugins: plugins, + vsr: volumesnapshot.NewReconcilerBuilder(cli, recorder).Build(), } } @@ -99,7 +107,6 @@ func NewBackupReconciler( // +kubebuilder:rbac:groups="",resources=pods,verbs=get // Reconcile is the main reconciliation loop -// nolint: gocognit func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { contextLogger, ctx := log.SetupLogger(ctx) contextLogger.Debug(fmt.Sprintf("reconciling object %#q", req.NamespacedName)) @@ -117,26 +124,29 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } - clusterName := backup.Spec.Cluster.Name var cluster apiv1.Cluster - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backup.Namespace, - Name: clusterName, - }, &cluster); err != nil { - if apierrs.IsNotFound(err) { - r.Recorder.Eventf(&backup, "Warning", "FindingCluster", - "Unknown cluster %v, will retry in 30 seconds", clusterName) - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + if res, err := r.getCluster(ctx, &backup, &cluster); err != nil || res != nil { + if res != nil { + return *res, err } + return ctrl.Result{}, err + } - tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("while getting cluster %s: %w", clusterName, err)) - r.Recorder.Eventf(&backup, "Warning", "FindingCluster", - "Error getting cluster %v, will not retry: %s", clusterName, err.Error()) - return ctrl.Result{}, nil + ctx = cluster.SetInContext(ctx) + + if res, err := r.checkPrerequisites(ctx, backup, cluster); err != nil || res != nil { + if res != nil { + return *res, err + } + return ctrl.Result{}, err } // Load the required plugins - pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + pluginClient, err := cnpgiClient.WithPlugins( + ctx, + r.Plugins, + apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)..., + ) if err != nil { contextLogger.Error(err, "Error loading plugins, retrying") return ctrl.Result{}, err @@ -145,23 +155,9 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr pluginClient.Close(ctx) }() - ctx = setPluginClientInContext(ctx, pluginClient) - - // Plugin pre-hooks - if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation { - return hookResult.Result, hookResult.Err - } - - // This check is still needed for when the backup resource creation is forced through the webhook - if backup.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { - message := "cannot proceed with the backup as the Kubernetes cluster has no VolumeSnapshot support" - contextLogger.Warning(message) - r.Recorder.Event(&backup, "Warning", "ClusterHasNoVolumeSnapshotCRD", message) - tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message)) - return ctrl.Result{}, nil - } + ctx = cnpgiClient.SetPluginClientInContext(ctx, pluginClient) - contextLogger.Debug("Found cluster for backup", "cluster", clusterName) + contextLogger.Debug("Found cluster for backup", "cluster", cluster.Name) // Store in the context the TLS configuration required communicating with the Pods ctx, err = certs.NewTLSConfigForContext( @@ -173,86 +169,36 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, err } - isRunning, err := r.isValidBackupRunning(ctx, &backup, &cluster) + // preflight checks that AREN'T formal. + // We ask questions like: "are there other backups running?", "is the current backup running?", + // "is the target instance healthy?" + if res, err := r.waitIfOtherBackupsRunning(ctx, &backup, &cluster); err != nil || !res.IsZero() { + return res, err + } + isRunning, err := r.isCurrentBackupRunning(ctx, backup, cluster) if err != nil { - contextLogger.Error(err, "while running isValidBackupRunning") return ctrl.Result{}, err } - if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore { - if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { - tryFlagBackupAsFailed(ctx, r.Client, &backup, - errors.New("no barmanObjectStore section defined on the target cluster")) - return ctrl.Result{}, nil - } - - if isRunning { - return ctrl.Result{}, nil - } - - r.Recorder.Eventf(&backup, "Normal", "Starting", - "Starting backup for cluster %v", cluster.Name) + if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation { + return hookResult.Result, hookResult.Err } - if backup.Spec.Method == apiv1.BackupMethodPlugin { - if isRunning { - return ctrl.Result{}, nil - } - - r.Recorder.Eventf(&backup, "Normal", "Starting", - "Starting backup for cluster %v", cluster.Name) + // When the instance manager is working we have to wait for it to finish + if isRunning && backup.Spec.Method.IsManagedByInstance() { + return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil } - origBackup := backup.DeepCopy() - - // From now on, we differentiate backups managed by the instance manager (barman and plugins) - // from the ones managed directly by the operator (VolumeSnapshot) - - switch backup.Spec.Method { - case apiv1.BackupMethodBarmanObjectStore, apiv1.BackupMethodPlugin: - // If no good running backups are found we elect a pod for the backup - pod, err := r.getBackupTargetPod(ctx, &cluster, &backup) - if apierrs.IsNotFound(err) { - r.Recorder.Eventf(&backup, "Warning", "FindingPod", - "Couldn't find target pod %s, will retry in 30 seconds", cluster.Status.TargetPrimary) - contextLogger.Info("Couldn't find target pod, will retry in 30 seconds", "target", - cluster.Status.TargetPrimary) - backup.Status.Phase = apiv1.BackupPhasePending - return ctrl.Result{RequeueAfter: 30 * time.Second}, r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)) - } + switch { + case backup.Spec.Method.IsManagedByInstance(): + res, err := r.startBackupManagedByInstance(ctx, cluster, backup) if err != nil { - tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("while getting pod: %w", err)) - r.Recorder.Eventf(&backup, "Warning", "FindingPod", "Error getting target pod: %s", - cluster.Status.TargetPrimary) - return ctrl.Result{}, nil - } - contextLogger.Debug("Found pod for backup", "pod", pod.Name) - - if !utils.IsPodReady(*pod) { - contextLogger.Info("Backup target is not ready, will retry in 30 seconds", "target", pod.Name) - backup.Status.Phase = apiv1.BackupPhasePending - r.Recorder.Eventf(&backup, "Warning", "BackupPending", "Backup target pod not ready: %s", - cluster.Status.TargetPrimary) - return ctrl.Result{RequeueAfter: 30 * time.Second}, r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)) - } - - contextLogger.Info("Starting backup", - "cluster", cluster.Name, - "pod", pod.Name) - - // This backup can be started - if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil { - r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err) - tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("encountered an error while taking the backup: %w", err)) - return ctrl.Result{}, nil + return ctrl.Result{}, err } - case apiv1.BackupMethodVolumeSnapshot: - if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { - tryFlagBackupAsFailed(ctx, r.Client, &backup, - errors.New("no volumeSnapshot section defined on the target cluster")) - return ctrl.Result{}, nil + if res != nil { + return *res, nil } - + case backup.Spec.Method.IsManagedByOperator(): res, err := r.reconcileSnapshotBackup(ctx, &cluster, &backup) if err != nil { return ctrl.Result{}, err @@ -271,6 +217,191 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return hookResult.Result, hookResult.Err } +func (r *BackupReconciler) startBackupManagedByInstance( + ctx context.Context, + cluster apiv1.Cluster, + backup apiv1.Backup, +) (*ctrl.Result, error) { + contextLogger, ctx := log.SetupLogger(ctx) + + origBackup := backup.DeepCopy() + + // If no good running backups are found we elect a pod for the backup + pod, err := r.getBackupTargetPod(ctx, &cluster, &backup) + if apierrs.IsNotFound(err) { + r.Recorder.Eventf(&backup, "Warning", "FindingPod", + "Couldn't find target pod %s, will retry in 30 seconds", cluster.Status.TargetPrimary) + contextLogger.Info("Couldn't find target pod, will retry in 30 seconds", "target", + cluster.Status.TargetPrimary) + backup.Status.Phase = apiv1.BackupPhasePending + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + if err != nil { + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, fmt.Errorf("while getting pod: %w", err)) + r.Recorder.Eventf(&backup, "Warning", "FindingPod", "Error getting target pod: %s", + cluster.Status.TargetPrimary) + return &ctrl.Result{}, nil + } + + contextLogger.Debug("Found pod for backup", "pod", pod.Name) + + if !utils.IsPodReady(*pod) { + contextLogger.Info("Backup target is not ready, will retry in 30 seconds", "target", pod.Name) + backup.Status.Phase = apiv1.BackupPhasePending + r.Recorder.Eventf(&backup, "Warning", "BackupPending", "Backup target pod not ready: %s", + cluster.Status.TargetPrimary) + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + contextLogger.Info("Starting backup", + "cluster", cluster.Name, + "pod", pod.Name) + + r.Recorder.Eventf(&backup, "Normal", "Starting", + "Starting backup for cluster %v", cluster.Name) + + // This backup can be started + if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil { + r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, + fmt.Errorf("encountered an error while taking the backup: %w", err)) + return &ctrl.Result{}, nil + } + return nil, nil +} + +func (r *BackupReconciler) isCurrentBackupRunning( + ctx context.Context, + backup apiv1.Backup, + cluster apiv1.Cluster, +) (bool, error) { + contextLogger := log.FromContext(ctx) + + isRunning, err := r.isValidBackupRunning(ctx, &backup, &cluster) + if err != nil { + contextLogger.Error(err, "while running isValidBackupRunning") + return false, err + } + if !isRunning { + return false, nil + } + + if backup.GetOnlineOrDefault(&cluster) { + if err := r.ensureTargetPodHealthy(ctx, r.Client, &backup, &cluster); err != nil { + contextLogger.Error(err, "while ensuring target pod is healthy") + + if flagErr := resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, nil, + fmt.Errorf("while ensuring target pod is healthy: %w", err)); flagErr != nil { + contextLogger.Error(flagErr, "while flagging backup as failed, retrying...") + return false, flagErr + } + + r.Recorder.Eventf(&backup, "Warning", "TargetPodNotHealthy", + "Error ensuring target pod is healthy: %s", err.Error()) + + return false, fmt.Errorf("interrupting backup as target pod is not healthy: %w", err) + } + } + + return true, nil +} + +// checkPrerequisites checks that the backup and cluster spec are FORMALLY valid and the kubernetes cluster supports +// the chosen backup method. +// These checks cannot be executed in the webhook given that we cannot fetch the cluster. +func (r *BackupReconciler) checkPrerequisites( + ctx context.Context, + backup apiv1.Backup, + cluster apiv1.Cluster, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + flagMissingPrerequisite := func(message string, reason string) (*ctrl.Result, error) { + contextLogger.Warning(message) + r.Recorder.Event(&backup, "Warning", reason, message) + err := resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) + return &ctrl.Result{}, err + } + if backup.Spec.Method == apiv1.BackupMethodPlugin { + if len(cluster.Spec.Plugins) == 0 { + const message = "cannot proceed with the backup as the cluster has no plugin configured" + return flagMissingPrerequisite(message, "ClusterHasNoBackupExecutorPlugin") + } + + return nil, nil + } + + if cluster.Spec.Backup == nil { + const message = "cannot proceed with the backup as the cluster has no backup section" + return flagMissingPrerequisite(message, "ClusterHasBackupConfigured") + } + + if backup.Spec.Method == apiv1.BackupMethodVolumeSnapshot { + // This check is still needed for when the backup resource creation is forced through the webhook + if !utils.HaveVolumeSnapshot() { + const message = "cannot proceed with the backup as the Kubernetes cluster has no VolumeSnapshot support" + return flagMissingPrerequisite(message, "ClusterHasNoVolumeSnapshotCRD") + } + + if cluster.Spec.Backup.VolumeSnapshot == nil { + const message = "no volumeSnapshot section defined on the target cluster" + return flagMissingPrerequisite(message, "ClusterHasNoVolumeSnapshotSection") + } + } + + if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore { + if cluster.Spec.Backup.BarmanObjectStore == nil { + const message = "no barmanObjectStore section defined on the target cluster" + return flagMissingPrerequisite(message, "ClusterHasNoBarmanSection") + } + } + + return nil, nil +} + +func (r *BackupReconciler) getCluster( + ctx context.Context, + backup *apiv1.Backup, + cluster *apiv1.Cluster, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + clusterName := backup.Spec.Cluster.Name + err := r.Get(ctx, client.ObjectKey{ + Namespace: backup.Namespace, + Name: clusterName, + }, cluster) + if err == nil { + return nil, nil + } + + if apierrs.IsNotFound(err) { + r.Recorder.Eventf(backup, "Warning", "FindingCluster", + "Unknown cluster %v, will retry in 30 seconds", clusterName) + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + contextLogger.Error(err, "error getting cluster, proceeding to flag backup as failed.") + + if flagErr := resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, nil, + fmt.Errorf("while getting cluster %s: %w", clusterName, err)); flagErr != nil { + contextLogger.Error(flagErr, "while flagging backup as failed, retrying...") + return nil, flagErr + } + + r.Recorder.Eventf(backup, "Warning", "FindingCluster", + "Error getting cluster %v, will not retry: %s", clusterName, err.Error()) + + return &ctrl.Result{}, nil +} + func (r *BackupReconciler) isValidBackupRunning( ctx context.Context, backup *apiv1.Backup, @@ -325,7 +456,7 @@ func (r *BackupReconciler) isValidBackupRunning( contextLogger.Info("Backup is already running on", "cluster", cluster.Name, "pod", pod.Name, - "started at", backup.Status.StartedAt) + "startedAt", backup.Status.StartedAt) // Nothing to do here return true, nil @@ -365,38 +496,26 @@ func (r *BackupReconciler) reconcileSnapshotBackup( "target", cluster.Status.TargetPrimary, ) + // TODO: shouldn't this be a failed backup? origBackup := backup.DeepCopy() backup.Status.Phase = apiv1.BackupPhasePending - err := r.Patch(ctx, backup, client.MergeFrom(origBackup)) - return &ctrl.Result{RequeueAfter: 30 * time.Second}, err + if err := r.Patch(ctx, backup, client.MergeFrom(origBackup)); err != nil { + return nil, err + } + + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil } if err != nil { - tryFlagBackupAsFailed(ctx, r.Client, backup, fmt.Errorf("while getting pod: %w", err)) + messageErr := fmt.Errorf("while getting pod: %w", err) + if flagErr := resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, cluster, messageErr); flagErr != nil { + return nil, fmt.Errorf("while flagging backup as failed: %w", flagErr) + } r.Recorder.Eventf(backup, "Warning", "FindingPod", "Error getting target pod: %s", cluster.Status.TargetPrimary) return &ctrl.Result{}, nil } - ctx = log.IntoContext(ctx, contextLogger.WithValues("targetPod", targetPod)) - - // Validate we don't have other running backups - var clusterBackups apiv1.BackupList - if err := r.List( - ctx, - &clusterBackups, - client.InNamespace(backup.GetNamespace()), - client.MatchingFields{clusterName: cluster.Name}, - ); err != nil { - return nil, err - } - - if !clusterBackups.CanExecuteBackup(backup.Name) { - contextLogger.Info( - "A backup is already in progress or waiting to be started, retrying", - "targetBackup", backup.Name, - ) - return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } + ctx = log.IntoContext(ctx, contextLogger.WithValues("targetPodName", targetPod.Name)) if !utils.PodHasContainerStatuses(*targetPod) { return nil, fmt.Errorf("target pod lacks container statuses") @@ -417,7 +536,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup( } } - if errCond := conditions.Patch(ctx, r.Client, cluster, apiv1.BackupStartingCondition); errCond != nil { + if errCond := resourcestatus.PatchConditionsWithOptimisticLock( + ctx, + r.Client, + cluster, + apiv1.BackupStartingCondition, + ); errCond != nil { contextLogger.Error(errCond, "Error while updating backup condition (backup starting)") } @@ -426,26 +550,14 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return nil, fmt.Errorf("cannot get PVCs: %w", err) } - reconciler := volumesnapshot. - NewReconcilerBuilder(r.Client, r.Recorder). - Build() - - res, err := reconciler.Reconcile(ctx, cluster, backup, targetPod, pvcs) - if isErrorRetryable(err) { - contextLogger.Error(err, "detected retryable error while executing snapshot backup, retrying...") - return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil - } + res, err := r.vsr.Reconcile(ctx, cluster, backup, targetPod, pvcs) if err != nil { // Volume Snapshot errors are not retryable, we need to set this backup as failed // and un-fence the Pod contextLogger.Error(err, "while executing snapshot backup") - // Update backup status in cluster conditions - if errCond := conditions.Patch(ctx, r.Client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil { - contextLogger.Error(errCond, "Error while updating backup condition (backup snapshot failed)") - } - r.Recorder.Eventf(backup, "Warning", "Error", "snapshot backup failed: %v", err) - tryFlagBackupAsFailed(ctx, r.Client, backup, fmt.Errorf("can't execute snapshot backup: %w", err)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, cluster, + fmt.Errorf("can't execute snapshot backup: %w", err)) return nil, volumesnapshot.EnsurePodIsUnfenced(ctx, r.Client, r.Recorder, cluster, backup, targetPod) } @@ -453,7 +565,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return res, nil } - if err := conditions.Patch(ctx, r.Client, cluster, apiv1.BackupSucceededCondition); err != nil { + if err := resourcestatus.PatchConditionsWithOptimisticLock( + ctx, + r.Client, + cluster, + apiv1.BackupSucceededCondition, + ); err != nil { contextLogger.Error(err, "Can't update the cluster with the completed snapshot backup data") } @@ -534,11 +651,6 @@ func updateClusterWithSnapshotsBackupTimes( return nil } -// isErrorRetryable detects is an error is retryable or not -func isErrorRetryable(err error) bool { - return apierrs.IsServerTimeout(err) || apierrs.IsConflict(err) || apierrs.IsInternalError(err) -} - // getBackupTargetPod returns the pod that should run the backup according to the current // cluster's target policy func (r *BackupReconciler) getBackupTargetPod(ctx context.Context, @@ -628,15 +740,10 @@ func startInstanceManagerBackup( }) if err != nil { log.FromContext(ctx).Error(err, "executing backup", "stdout", stdout, "stderr", stderr) - status.SetAsFailed(fmt.Errorf("can't execute backup: %w", err)) - status.CommandError = stderr - status.CommandError = stdout - - // Update backup status in cluster conditions - if errCond := conditions.Patch(ctx, client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil { - log.FromContext(ctx).Error(errCond, "Error while updating backup condition (backup failed)") + setCommandErr := func(backup *apiv1.Backup) { + backup.Status.CommandError = fmt.Sprintf("with stderr: %s, with stdout: %s", stderr, stdout) } - return postgres.PatchBackupStatusAndRetry(ctx, client, backup) + return resourcestatus.FlagBackupAsFailed(ctx, client, backup, cluster, err, setCommandErr) } return nil @@ -656,7 +763,7 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage if err := mgr.GetFieldIndexer().IndexField( ctx, &apiv1.Backup{}, - clusterName, func(rawObj client.Object) []string { + clusterNameField, func(rawObj client.Object) []string { return []string{rawObj.(*apiv1.Backup).Spec.Cluster.Name} }); err != nil { return err @@ -664,6 +771,7 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage controllerBuilder := ctrl.NewControllerManagedBy(mgr). For(&apiv1.Backup{}). + Named("backup"). Watches(&apiv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.mapClustersToBackup()), builder.WithPredicates(clustersWithBackupPredicate), @@ -681,17 +789,76 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage return controllerBuilder.Complete(r) } -func tryFlagBackupAsFailed( +func (r *BackupReconciler) ensureTargetPodHealthy( ctx context.Context, cli client.Client, backup *apiv1.Backup, - err error, -) { + cluster *apiv1.Cluster, +) error { + if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 { + return fmt.Errorf("no target pod assigned for backup %s", backup.Name) + } + + podName := backup.Status.InstanceID.PodName + + var pod corev1.Pod + if err := cli.Get(ctx, client.ObjectKey{ + Namespace: backup.Namespace, + Name: podName, + }, &pod); err != nil { + if apierrs.IsNotFound(err) { + return fmt.Errorf("target pod %s not found in namespace %s for backup %s", podName, backup.Namespace, backup.Name) + } + return fmt.Errorf( + "error getting target pod %s in namespace %s for backup %s: %w", podName, backup.Namespace, + backup.Name, + err, + ) + } + + // if the pod is present we evaluate its health status + healthyPods, ok := cluster.Status.InstancesStatus[apiv1.PodHealthy] + if !ok { + return fmt.Errorf("no status found for target pod %s in cluster %s", podName, cluster.Name) + } + + if !slices.Contains(healthyPods, podName) { + return fmt.Errorf("target pod %s is not healthy for backup in cluster %s", podName, cluster.Name) + } + + contextLogger := log.FromContext(ctx) + contextLogger.Debug("Target pod is healthy for backup", + "podName", podName, + "backupName", backup.Name, + ) + return nil +} + +func (r *BackupReconciler) waitIfOtherBackupsRunning( + ctx context.Context, + backup *apiv1.Backup, + cluster *apiv1.Cluster, +) (ctrl.Result, error) { contextLogger := log.FromContext(ctx) - origBackup := backup.DeepCopy() - backup.Status.SetAsFailed(err) - if err := cli.Status().Patch(ctx, backup, client.MergeFrom(origBackup)); err != nil { - contextLogger.Error(err, "while flagging backup as failed") + // Validate we don't have other running backups + var clusterBackups apiv1.BackupList + if err := r.List( + ctx, + &clusterBackups, + client.InNamespace(backup.GetNamespace()), + client.MatchingFields{clusterNameField: cluster.Name}, + ); err != nil { + return ctrl.Result{}, err + } + + if !clusterBackups.CanExecuteBackup(backup.Name) { + contextLogger.Info( + "A backup is already in progress or waiting to be started, retrying", + "targetBackup", backup.Name, + ) + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } + + return ctrl.Result{}, nil } diff --git a/internal/controller/backup_controller_test.go b/internal/controller/backup_controller_test.go index 6890e2ad46..c228bb573a 100644 --- a/internal/controller/backup_controller_test.go +++ b/internal/controller/backup_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -23,6 +26,7 @@ import ( volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -333,9 +337,13 @@ var _ = Describe("update snapshot backup metadata", func() { }) It("should update cluster with no metadata", func(ctx context.Context) { + //nolint:staticcheck Expect(cluster.Status.FirstRecoverabilityPoint).To(BeEmpty()) + //nolint:staticcheck Expect(cluster.Status.FirstRecoverabilityPointByMethod).To(BeEmpty()) + //nolint:staticcheck Expect(cluster.Status.LastSuccessfulBackup).To(BeEmpty()) + //nolint:staticcheck Expect(cluster.Status.LastSuccessfulBackupByMethod).To(BeEmpty()) fakeClient := fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). WithObjects(cluster). @@ -351,24 +359,34 @@ var _ = Describe("update snapshot backup metadata", func() { Name: cluster.Name, }, &updatedCluster) Expect(err).ToNot(HaveOccurred()) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPoint).To(Equal(twoHoursAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod). ToNot(HaveKey(apiv1.BackupMethodBarmanObjectStore)) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackup).To(Equal(oneHourAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod). ToNot(HaveKey(apiv1.BackupMethodBarmanObjectStore)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(oneHourAgo)) }) It("should consider other methods when update the metadata", func(ctx context.Context) { + //nolint:staticcheck cluster.Status.FirstRecoverabilityPoint = threeHoursAgo.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.FirstRecoverabilityPointByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: threeHoursAgo, } + //nolint:staticcheck cluster.Status.LastSuccessfulBackup = now.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.LastSuccessfulBackupByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: now, } @@ -386,25 +404,35 @@ var _ = Describe("update snapshot backup metadata", func() { Name: cluster.Name, }, &updatedCluster) Expect(err).ToNot(HaveOccurred()) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPoint).To(Equal(threeHoursAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(threeHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackup).To(Equal(now.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(now)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(oneHourAgo)) }) It("should override other method metadata when appropriate", func(ctx context.Context) { + //nolint:staticcheck cluster.Status.FirstRecoverabilityPoint = oneHourAgo.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.FirstRecoverabilityPointByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: oneHourAgo, apiv1.BackupMethodVolumeSnapshot: now, } + //nolint:staticcheck cluster.Status.LastSuccessfulBackup = oneHourAgo.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.LastSuccessfulBackupByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: twoHoursAgo, apiv1.BackupMethodVolumeSnapshot: threeHoursAgo, @@ -423,15 +451,90 @@ var _ = Describe("update snapshot backup metadata", func() { Name: cluster.Name, }, &updatedCluster) Expect(err).ToNot(HaveOccurred()) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPoint).To(Equal(twoHoursAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(oneHourAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackup).To(Equal(oneHourAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(oneHourAgo)) }) }) + +var _ = Describe("checkPrerequisites for plugin backups", func() { + var env *testingEnvironment + BeforeEach(func() { env = buildTestEnvironment() }) + + It("allows plugin backups without cluster.spec.backup when a plugin is configured", func(ctx context.Context) { + ns := newFakeNamespace(env.client) + + cluster := newFakeCNPGCluster(env.client, ns, func(c *apiv1.Cluster) { + c.Spec.Backup = nil + c.Spec.Plugins = []apiv1.PluginConfiguration{{ + Name: "test", + Enabled: ptr.To(true), + Parameters: map[string]string{"key": "value"}, + }} + }) + + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{Name: "test-plugin-backup", Namespace: ns}, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{Name: cluster.Name}, + Method: apiv1.BackupMethodPlugin, + }, + } + // Create the backup so that status updates in prerequisites can patch it if needed + expectErr := env.client.Create(ctx, backup) + Expect(expectErr).ToNot(HaveOccurred()) + + res, err := env.backupReconciler.checkPrerequisites(ctx, *backup, *cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(res).To(BeNil()) + + // Ensure backup was not marked as failed + var stored apiv1.Backup + expectErr = env.client.Get(ctx, client.ObjectKeyFromObject(backup), &stored) + Expect(expectErr).ToNot(HaveOccurred()) + Expect(stored.Status.Phase).To(BeEmpty()) + }) + + It("fails plugin backups when no plugin is configured on the cluster", func(ctx context.Context) { + ns := newFakeNamespace(env.client) + + cluster := newFakeCNPGCluster(env.client, ns, func(c *apiv1.Cluster) { + c.Spec.Backup = nil + c.Spec.Plugins = nil + }) + + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{Name: "test-plugin-backup-missing", Namespace: ns}, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{Name: cluster.Name}, + Method: apiv1.BackupMethodPlugin, + }, + } + expectErr := env.client.Create(ctx, backup) + Expect(expectErr).ToNot(HaveOccurred()) + + res, err := env.backupReconciler.checkPrerequisites(ctx, *backup, *cluster) + // We expect the reconciler to flag failure and return a non-nil result without bubbling an error + Expect(err).ToNot(HaveOccurred()) + Expect(res).ToNot(BeNil()) + + var stored apiv1.Backup + expectErr = env.client.Get(ctx, client.ObjectKeyFromObject(backup), &stored) + Expect(expectErr).ToNot(HaveOccurred()) + Expect(stored.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseFailed)) + Expect(stored.Status.Method).To(BeEquivalentTo(apiv1.BackupMethodPlugin)) + }) +}) diff --git a/internal/controller/backup_predicates.go b/internal/controller/backup_predicates.go index 5fcfd39dd9..147af6fb89 100644 --- a/internal/controller/backup_predicates.go +++ b/internal/controller/backup_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -70,7 +73,7 @@ func (r *BackupReconciler) mapClustersToBackup() handler.MapFunc { return nil } var backups apiv1.BackupList - err := r.Client.List(ctx, &backups, + err := r.List(ctx, &backups, client.MatchingFields{ backupPhase: apiv1.BackupPhaseRunning, }, diff --git a/internal/controller/backup_predicates_test.go b/internal/controller/backup_predicates_test.go index e7715ab37c..49ccd4edea 100644 --- a/internal/controller/backup_predicates_test.go +++ b/internal/controller/backup_predicates_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_cleanup.go b/internal/controller/cluster_cleanup.go index ccc7f5dec0..5370654e76 100644 --- a/internal/controller/cluster_cleanup.go +++ b/internal/controller/cluster_cleanup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_cleanup_test.go b/internal/controller/cluster_cleanup_test.go index 23e05d836a..c6162ce9c7 100644 --- a/internal/controller/cluster_cleanup_test.go +++ b/internal/controller/cluster_cleanup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 4a446d21ea..d316580cbf 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the controller of the CRD @@ -25,6 +28,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -37,6 +41,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -47,13 +52,15 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/operatorclient" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + rolloutManager "github.com/cloudnative-pg/cloudnative-pg/internal/controller/rollout" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" instanceReconciler "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance" + "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/majorupgrade" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -67,7 +74,12 @@ const ( imageCatalogKey = ".spec.imageCatalog.name" ) -var apiGVString = apiv1.GroupVersion.String() +var apiSGVString = apiv1.SchemeGroupVersion.String() + +// errOldPrimaryDetected occurs when a primary Pod loses connectivity with the +// API server and, upon reconnection, attempts to retain its previous primary +// role. +var errOldPrimaryDetected = errors.New("old primary detected") // ClusterReconciler reconciles a Cluster objects type ClusterReconciler struct { @@ -76,8 +88,11 @@ type ClusterReconciler struct { DiscoveryClient discovery.DiscoveryInterface Scheme *runtime.Scheme Recorder record.EventRecorder - InstanceClient instance.Client + InstanceClient remote.InstanceClient Plugins repository.Interface + + drainTaints []string + rolloutManager *rolloutManager.Manager } // NewClusterReconciler creates a new ClusterReconciler initializing it @@ -85,14 +100,20 @@ func NewClusterReconciler( mgr manager.Manager, discoveryClient *discovery.DiscoveryClient, plugins repository.Interface, + drainTaints []string, ) *ClusterReconciler { return &ClusterReconciler{ - InstanceClient: instance.NewStatusClient(), + InstanceClient: remote.NewClient().Instance(), DiscoveryClient: discoveryClient, Client: operatorclient.NewExtendedClient(mgr.GetClient()), Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg"), Plugins: plugins, + rolloutManager: rolloutManager.New( + configuration.Current.GetClustersRolloutDelay(), + configuration.Current.GetInstancesRolloutDelay(), + ), + drainTaints: drainTaints, } } @@ -124,6 +145,8 @@ var ErrNextLoop = utils.ErrNextLoop // +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshots,verbs=get;create;watch;list;patch // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=imagecatalogs,verbs=get;watch;list // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusterimagecatalogs,verbs=get;watch;list +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=failoverquorums,verbs=create;get;watch;delete;list +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=failoverquorums/status,verbs=get;patch;update;watch // Reconcile is the operator reconcile loop func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -148,13 +171,30 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct "namespace", req.Namespace, ) } + if err := r.notifyDeletionToOwnedResources(ctx, req.NamespacedName); err != nil { + contextLogger.Error( + err, + "error while deleting finalizers of objects on the cluster", + "clusterName", req.Name, + "namespace", req.Namespace, + ) + } return ctrl.Result{}, err } ctx = cluster.SetInContext(ctx) - // Load the required plugins - pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + // Load the plugins required to bootstrap and reconcile this cluster + enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) + enabledPluginNames = append( + enabledPluginNames, + apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., + ) + + pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) + defer cancelPluginLoading() + + pluginClient, err := cnpgiClient.WithPlugins(pluginLoadingContext, r.Plugins, enabledPluginNames...) if err != nil { var errUnknownPlugin *repository.ErrUnknownPlugin if errors.As(err, &errUnknownPlugin) { @@ -170,6 +210,15 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct ) } + if regErr := r.RegisterPhase( + ctx, + cluster, + apiv1.PhaseFailurePlugin, + fmt.Sprintf("Error while discovering plugins: %s", err.Error()), + ); regErr != nil { + contextLogger.Error(regErr, "unable to register phase", "outerErr", err.Error()) + } + contextLogger.Error(err, "Error loading plugins, retrying") return ctrl.Result{}, err } @@ -177,7 +226,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct pluginClient.Close(ctx) }() - ctx = setPluginClientInContext(ctx, pluginClient) + ctx = cnpgiClient.SetPluginClientInContext(ctx, pluginClient) // Run the inner reconcile loop. Translate any ErrNextLoop to an errorless return result, err := r.reconcile(ctx, cluster) @@ -187,6 +236,21 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct if errors.Is(err, utils.ErrTerminateLoop) { return ctrl.Result{}, nil } + + // This code assumes that we always end the reconciliation loop if we encounter an error. + // In case that the assumption is false this code could overwrite an error phase. + if cnpgiClient.ContainsPluginError(err) { + if regErr := r.RegisterPhase( + ctx, + cluster, + apiv1.PhaseFailurePlugin, + fmt.Sprintf("Encountered an error while interacting with plugins: %s", err.Error()), + ); regErr != nil { + contextLogger.Error(regErr, "unable to register phase", "outerErr", err.Error()) + } + return ctrl.Result{RequeueAfter: 15 * time.Second}, nil + } + if err != nil { return ctrl.Result{}, err } @@ -278,6 +342,8 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste // Calls pre-reconcile hooks if hookResult := preReconcilePluginHooks(ctx, cluster, cluster); hookResult.StopReconciliation { + contextLogger.Info("Pre-reconcile hook stopped the reconciliation loop", + "hookResult", hookResult) return hookResult.Result, hookResult.Err } @@ -294,10 +360,12 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste if cluster.ShouldPromoteFromReplicaCluster() { if !(cluster.Status.Phase == apiv1.PhaseReplicaClusterPromotion || cluster.Status.Phase == apiv1.PhaseUnrecoverable) { - return ctrl.Result{RequeueAfter: 1 * time.Second}, r.RegisterPhase(ctx, + if err := r.RegisterPhase(ctx, cluster, apiv1.PhaseReplicaClusterPromotion, - "Replica cluster promotion in progress") + "Replica cluster promotion in progress"); err != nil { + return ctrl.Result{}, err + } } return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } @@ -325,6 +393,27 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste return ctrl.Result{}, fmt.Errorf("cannot update the instances status on the cluster: %w", err) } + // If a Pod loses connectivity, the operator will fail over but the faulty + // Pod would not receive a change of its role from primary to replica. + // + // When the connectivity resumes the operator will find two primaries: + // the previously faulting one and the new primary that has been + // promoted. The operator should just wait for the Pods to get its + // current role from auto-healing to proceed. Without this safety + // measure, the operator would just fail back to the first primary of + // the list. + if primaryNames := instancesStatus.PrimaryNames(); len(primaryNames) > 1 { + contextLogger.Error( + errOldPrimaryDetected, + "An old primary pod has been detected. Awaiting its recognition of the new role", + "primaryNames", primaryNames, + ) + instancesStatus.LogStatus(ctx) + return ctrl.Result{ + RequeueAfter: 5 * time.Second, + }, nil + } + if err := persistentvolumeclaim.ReconcileMetadata( ctx, r.Client, @@ -357,21 +446,27 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste contextLogger.Warning( "Failed to extract instance status from ready instances. Attempting to requeue...", ) - registerPhaseErr := r.RegisterPhase( + if err := r.RegisterPhase( ctx, cluster, "Instance Status Extraction Error: HTTP communication issue", "Communication issue detected: The operator was unable to receive the status from all the ready instances. "+ "This may be due to network restrictions such as NetworkPolicy and/or any other network plugin setting. "+ "Please verify your network configuration.", - ) - return ctrl.Result{RequeueAfter: 10 * time.Second}, registerPhaseErr + ); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } if res, err := r.ensureNoFailoverOnFullDisk(ctx, cluster, instancesStatus); err != nil || !res.IsZero() { return res, err } + if res, err := r.requireWALArchivingPluginOrDelete(ctx, instancesStatus); err != nil || !res.IsZero() { + return res, err + } + if res, err := replicaclusterswitch.Reconcile( ctx, r.Client, cluster, r.InstanceClient, instancesStatus); res != nil || err != nil { if res != nil { @@ -406,7 +501,7 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste // we need to wait for it to be refreshed contextLogger.Info( "Waiting for the Kubelet to refresh the readiness probe", - "mostAdvancedInstanceName", mostAdvancedInstance.Node, + "mostAdvancedInstanceName", mostAdvancedInstance.Pod.Name, "hasHTTPStatus", hasHTTPStatus, "isPodReady", isPodReady) return ctrl.Result{RequeueAfter: 1 * time.Second}, nil @@ -461,10 +556,12 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste // Calls post-reconcile hooks if hookResult := postReconcilePluginHooks(ctx, cluster, cluster); hookResult.Err != nil || !hookResult.Result.IsZero() { + contextLogger.Info("Post-reconcile hook stopped the reconciliation loop", + "hookResult", hookResult) return hookResult.Result, hookResult.Err } - return setStatusPluginHook(ctx, r.Client, getPluginClientFromContext(ctx), cluster) + return setStatusPluginHook(ctx, r.Client, cnpgiClient.GetPluginClientFromContext(ctx), cluster) } func (r *ClusterReconciler) ensureNoFailoverOnFullDisk( @@ -491,13 +588,39 @@ func (r *ClusterReconciler) ensureNoFailoverOnFullDisk( reason := "Insufficient disk space detected in one or more pods is preventing PostgreSQL from running." + "Please verify your storage settings. Further information inside .status.instancesReportedState" - registerPhaseErr := r.RegisterPhase( + if err := r.RegisterPhase( ctx, cluster, "Not enough disk space", reason, - ) - return ctrl.Result{RequeueAfter: 10 * time.Second}, registerPhaseErr + ); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func (r *ClusterReconciler) requireWALArchivingPluginOrDelete( + ctx context.Context, + instances postgres.PostgresqlStatusList, +) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx).WithName("require_wal_archiving_plugin_delete") + + for _, state := range instances.Items { + if !isTerminatedBecauseOfMissingWALArchivePlugin(state.Pod) { + contextLogger.Warning( + "Detected instance manager initialization procedure that failed "+ + "because the required WAL archive plugin is missing. Deleting it to trigger rollout", + "targetPod", state.Pod.Name) + if err := r.Delete(ctx, state.Pod); err != nil { + contextLogger.Error(err, "Cannot delete the pod", "pod", state.Pod.Name) + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + } + + return ctrl.Result{}, nil } func (r *ClusterReconciler) handleSwitchover( @@ -667,18 +790,35 @@ func (r *ClusterReconciler) reconcileResources( cluster, resources.instances.Items, resources.pvcs.Items, - ); !res.IsZero() || err != nil { + ); err != nil || !res.IsZero() { return res, err } + // In-place Postgres major version upgrades + if result, err := majorupgrade.Reconcile( + ctx, + r.Client, + cluster, + resources.instances.Items, + resources.pvcs.Items, + resources.jobs.Items, + ); err != nil { + return ctrl.Result{}, fmt.Errorf("cannot reconcile in-place major version upgrades: %w", err) + } else if result != nil { + return *result, err + } + // Reconcile Pods if res, err := r.reconcilePods(ctx, cluster, resources, instancesStatus); !res.IsZero() || err != nil { return res, err } if len(resources.instances.Items) > 0 && resources.noInstanceIsAlive() { - return ctrl.Result{RequeueAfter: 1 * time.Second}, r.RegisterPhase(ctx, cluster, apiv1.PhaseUnrecoverable, - "No pods are active, the cluster needs manual intervention ") + if err := r.RegisterPhase(ctx, cluster, apiv1.PhaseUnrecoverable, + "No pods are active, the cluster needs manual intervention "); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } // If we still need more instances, we need to wait before setting healthy status @@ -764,8 +904,15 @@ func (r *ClusterReconciler) processUnschedulableInstances( } if podRollout := isPodNeedingRollout(ctx, pod, cluster); podRollout.required { - return &ctrl.Result{RequeueAfter: 1 * time.Second}, - r.upgradePod(ctx, cluster, pod, fmt.Sprintf("recreating unschedulable pod: %s", podRollout.reason)) + if err := r.upgradePod( + ctx, + cluster, + pod, + fmt.Sprintf("recreating unschedulable pod: %s", podRollout.reason), + ); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: 1 * time.Second}, nil } if !cluster.IsNodeMaintenanceWindowInProgress() || cluster.IsReusePVCEnabled() { @@ -805,7 +952,8 @@ func (r *ClusterReconciler) processUnschedulableInstances( func (r *ClusterReconciler) reconcilePods( ctx context.Context, cluster *apiv1.Cluster, - resources *managedResources, instancesStatus postgres.PostgresqlStatusList, + resources *managedResources, + instancesStatus postgres.PostgresqlStatusList, ) (ctrl.Result, error) { contextLogger := log.FromContext(ctx) @@ -862,17 +1010,53 @@ func (r *ClusterReconciler) reconcilePods( } } - // Stop acting here if there are non-ready Pods + // Requeue here if there are non-ready Pods. // In the rest of the function we are sure that // cluster.Status.Instances == cluster.Spec.Instances and // we don't need to modify the cluster topology if cluster.Status.ReadyInstances != cluster.Status.Instances || - cluster.Status.ReadyInstances != len(instancesStatus.Items) || - !instancesStatus.IsComplete() { + cluster.Status.ReadyInstances != len(instancesStatus.Items) { contextLogger.Debug("Waiting for Pods to be ready") return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop } + // If there is a Pod that doesn't report its HTTP status, + // we wait until the Pod gets marked as non ready or until we're + // able to connect to it. + if !instancesStatus.IsComplete() { + podsReportingStatus := stringset.New() + podsNotReportingStatus := make(map[string]string) + for i := range instancesStatus.Items { + podName := instancesStatus.Items[i].Pod.Name + if instancesStatus.Items[i].Error != nil { + podsNotReportingStatus[podName] = instancesStatus.Items[i].Error.Error() + } else { + podsReportingStatus.Put(podName) + } + } + + contextLogger.Info( + "Waiting for Pods to report HTTP status", + "podsReportingStatus", podsReportingStatus.ToSortedList(), + "podsNotReportingStatus", podsNotReportingStatus, + ) + return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop + } + + report := instancesStatus.GetConfigurationReport() + + // If any pod is not reporting its configuration (i.e., uniform == nil), + // proceed with a rolling update to upgrade the instance manager + // to a version that reports the configuration status. + // If all pods report their configuration, wait until all instances + // report the same configuration. + if uniform := report.IsUniform(); uniform != nil && !*uniform { + contextLogger.Debug( + "Waiting for all Pods to have the same PostgreSQL configuration", + "configurationReport", report) + return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop + } + return r.handleRollingUpdate(ctx, cluster, instancesStatus) } @@ -924,6 +1108,21 @@ func (r *ClusterReconciler) handleRollingUpdate( "not connected via streaming replication, waiting for 5 seconds", ) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + case errors.Is(err, errRolloutDelayed): + contextLogger.Warning( + "A Pod need to be rolled out, but the rollout is being delayed", + ) + if err := r.RegisterPhase( + ctx, + cluster, + apiv1.PhaseUpgradeDelayed, + "The cluster need to be update, but the operator is configured to delay "+ + "the operation", + ); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: 15 * time.Second}, nil case err != nil: return ctrl.Result{}, err case done: @@ -959,14 +1158,18 @@ func (r *ClusterReconciler) handleRollingUpdate( } // SetupWithManager creates a ClusterReconciler -func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { +func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, maxConcurrentReconciles int) error { err := r.createFieldIndexes(ctx, mgr) if err != nil { return err } return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{ + MaxConcurrentReconciles: maxConcurrentReconciles, + }). For(&apiv1.Cluster{}). + Named("cluster"). Owns(&corev1.Pod{}). Owns(&batchv1.Job{}). Owns(&corev1.Service{}). @@ -989,7 +1192,7 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag Watches( &corev1.Node{}, handler.EnqueueRequestsFromMapFunc(r.mapNodeToClusters()), - builder.WithPredicates(nodesPredicate), + builder.WithPredicates(r.nodesPredicate()), ). Watches( &apiv1.ImageCatalog{}, @@ -1004,6 +1207,19 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag Complete(r) } +// jobOwnerIndexFunc maps a job definition to its owning cluster and +// is used as an index function to speed up the lookup of jobs +// created by the operator. +func jobOwnerIndexFunc(rawObj client.Object) []string { + job := rawObj.(*batchv1.Job) + + if ownerName, ok := IsOwnedByCluster(job); ok { + return []string{ownerName} + } + + return nil +} + // createFieldIndexes creates the indexes needed by this controller func (r *ClusterReconciler) createFieldIndexes(ctx context.Context, mgr ctrl.Manager) error { // Create a new indexed field on Pods. This field will be used to easily @@ -1108,15 +1324,7 @@ func (r *ClusterReconciler) createFieldIndexes(ctx context.Context, mgr ctrl.Man return mgr.GetFieldIndexer().IndexField( ctx, &batchv1.Job{}, - jobOwnerKey, func(rawObj client.Object) []string { - job := rawObj.(*batchv1.Job) - - if ownerName, ok := IsOwnedByCluster(job); ok { - return []string{ownerName} - } - - return nil - }) + jobOwnerKey, jobOwnerIndexFunc) } // IsOwnedByCluster checks that an object is owned by a Cluster and returns @@ -1131,7 +1339,7 @@ func IsOwnedByCluster(obj client.Object) (string, bool) { return "", false } - if owner.APIVersion != apiGVString { + if owner.APIVersion != apiSGVString { return "", false } @@ -1277,11 +1485,13 @@ func filterClustersUsingConfigMap( func (r *ClusterReconciler) mapNodeToClusters() handler.MapFunc { return func(ctx context.Context, obj client.Object) []reconcile.Request { node := obj.(*corev1.Node) + // exit if the node is schedulable (e.g. not cordoned) // could be expanded here with other conditions (e.g. pressure or issues) - if !node.Spec.Unschedulable { + if !isNodeUnschedulableOrBeingDrained(node, r.drainTaints) { return nil } + var childPods corev1.PodList // get all the pods handled by the operator on that node err := r.List(ctx, &childPods, @@ -1351,77 +1561,3 @@ func (r *ClusterReconciler) markPVCReadyForCompletedJobs( return nil } - -// TODO: only required to cleanup custom monitoring queries configmaps from older versions (v1.10 and v1.11) -// that could have been copied with the source configmap name instead of the new default one. -// Should be removed in future releases. -func (r *ClusterReconciler) deleteOldCustomQueriesConfigmap(ctx context.Context, cluster *apiv1.Cluster) { - contextLogger := log.FromContext(ctx) - - // if the cluster didn't have default monitoring queries, do nothing - if cluster.Spec.Monitoring.AreDefaultQueriesDisabled() || - configuration.Current.MonitoringQueriesConfigmap == "" || - configuration.Current.MonitoringQueriesConfigmap == apiv1.DefaultMonitoringConfigMapName { - return - } - - // otherwise, remove the old default monitoring queries configmap from the cluster and delete it, if present - oldCmID := -1 - for idx, cm := range cluster.Spec.Monitoring.CustomQueriesConfigMap { - if cm.Name == configuration.Current.MonitoringQueriesConfigmap && - cm.Key == apiv1.DefaultMonitoringKey { - oldCmID = idx - break - } - } - - // if we didn't find it, do nothing - if oldCmID < 0 { - return - } - - // if we found it, we are going to get it and check it was actually created by the operator or was already deleted - var oldCm corev1.ConfigMap - err := r.Get(ctx, types.NamespacedName{ - Name: configuration.Current.MonitoringQueriesConfigmap, - Namespace: cluster.Namespace, - }, &oldCm) - // if we found it, we check the annotation the operator should have set to be sure it was created by us - if err == nil { // nolint:nestif - // if it was, we delete it and proceed to remove it from the cluster monitoring spec - if _, ok := oldCm.Annotations[utils.OperatorVersionAnnotationName]; ok { - err = r.Delete(ctx, &oldCm) - // if there is any error except the cm was already deleted, we return - if err != nil && !apierrs.IsNotFound(err) { - contextLogger.Warning("error while deleting old default monitoring custom queries configmap", - "err", err, - "configmap", configuration.Current.MonitoringQueriesConfigmap) - return - } - } else { - // it exists, but it's not handled by the operator, we do nothing - contextLogger.Warning("A configmap with the same name as the old default monitoring queries "+ - "configmap exists, but doesn't have the required annotation, so it won't be deleted, "+ - "nor removed from the cluster monitoring spec", - "configmap", oldCm.Name) - return - } - } else if !apierrs.IsNotFound(err) { - // if there is any error except the cm was already deleted, we return - contextLogger.Warning("error while getting old default monitoring custom queries configmap", - "err", err, - "configmap", configuration.Current.MonitoringQueriesConfigmap) - return - } - // both if it exists or not, if we are here we should delete it from the list of custom queries configmaps - oldCluster := cluster.DeepCopy() - cluster.Spec.Monitoring.CustomQueriesConfigMap = append(cluster.Spec.Monitoring.CustomQueriesConfigMap[:oldCmID], - cluster.Spec.Monitoring.CustomQueriesConfigMap[oldCmID+1:]...) - err = r.Patch(ctx, cluster, client.MergeFrom(oldCluster)) - if err != nil { - log.Warning("had an error while removing the old custom monitoring queries configmap from "+ - "the monitoring section in the cluster", - "err", err, - "configmap", configuration.Current.MonitoringQueriesConfigmap) - } -} diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go index ef62079db2..24f567d814 100644 --- a/internal/controller/cluster_controller_test.go +++ b/internal/controller/cluster_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( - "context" "time" cnpgTypes "github.com/cloudnative-pg/machinery/pkg/types" @@ -27,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" @@ -73,8 +76,7 @@ var _ = Describe("Updating target primary", func() { env = buildTestEnvironment() }) - It("selects the new target primary right away", func() { - ctx := context.TODO() + It("selects the new target primary right away", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace) @@ -132,8 +134,7 @@ var _ = Describe("Updating target primary", func() { }) }) - It("it should wait the failover delay to select the new target primary", func() { - ctx := context.TODO() + It("it should wait the failover delay to select the new target primary", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace, func(cluster *apiv1.Cluster) { cluster.Spec.FailoverDelay = 2 @@ -210,8 +211,7 @@ var _ = Describe("Updating target primary", func() { }) }) - It("Issue #1783: ensure that the scale-down behaviour remain consistent", func() { - ctx := context.TODO() + It("Issue #1783: ensure that the scale-down behaviour remain consistent", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace, func(cluster *apiv1.Cluster) { cluster.Spec.Instances = 2 @@ -266,7 +266,7 @@ var _ = Describe("Updating target primary", func() { By("checking that the third instance exists even if the cluster has two instances", func() { var expectedPod corev1.Pod instanceName := specs.GetInstanceName(cluster.Name, 3) - err := env.clusterReconciler.Client.Get(ctx, types.NamespacedName{ + err := env.clusterReconciler.Get(ctx, types.NamespacedName{ Name: instanceName, Namespace: cluster.Namespace, }, &expectedPod) @@ -274,3 +274,43 @@ var _ = Describe("Updating target primary", func() { }) }) }) + +var _ = Describe("isNodeUnschedulableOrBeingDrained", func() { + node := &corev1.Node{} + nodeUnschedulable := &corev1.Node{ + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + } + nodeTainted := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "karpenter.sh/disrupted", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + nodeWithUnknownTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "unknown.io/taint", + Effect: corev1.TaintEffectPreferNoSchedule, + }, + }, + }, + } + + DescribeTable( + "it detects nodes that are unschedulable or being drained", + func(node *corev1.Node, expected bool) { + Expect(isNodeUnschedulableOrBeingDrained(node, configuration.DefaultDrainTaints)).To(Equal(expected)) + }, + Entry("plain node", node, false), + Entry("node is unschedulable", nodeUnschedulable, true), + Entry("node is tainted", nodeTainted, true), + Entry("node has an unknown taint", nodeWithUnknownTaint, false), + ) +}) diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 239cff3e0e..c7f1bb53a7 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -23,6 +26,7 @@ import ( "slices" "time" + "github.com/cloudnative-pg/machinery/pkg/log" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/sethvargo/go-password/password" batchv1 "k8s.io/api/batch/v1" @@ -46,7 +50,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" - "github.com/cloudnative-pg/machinery/pkg/log" ) // createPostgresClusterObjects ensures that we have the required global objects @@ -98,11 +101,10 @@ func (r *ClusterReconciler) createPostgresClusterObjects(ctx context.Context, cl return err } - // TODO: only required to cleanup custom monitoring queries configmaps from older versions (v1.10 and v1.11) - // that could have been copied with the source configmap name instead of the new default one. - // Should be removed in future releases. - // should never return an error, not a requirement, just a nice to have - r.deleteOldCustomQueriesConfigmap(ctx, cluster) + err = r.reconcileFailoverQuorumObject(ctx, cluster) + if err != nil { + return err + } return nil } @@ -182,7 +184,8 @@ func (r *ClusterReconciler) reconcileSuperuserSecret(ctx context.Context, cluste cluster.GetServiceReadWriteName(), "*", "postgres", - postgresPassword) + postgresPassword, + utils.UserTypeSuperuser) cluster.SetInheritedDataAndOwnership(&postgresSecret.ObjectMeta) return createOrPatchClusterCredentialSecret(ctx, r.Client, postgresSecret) @@ -222,7 +225,8 @@ func (r *ClusterReconciler) reconcileAppUserSecret(ctx context.Context, cluster cluster.GetServiceReadWriteName(), cluster.GetApplicationDatabaseName(), cluster.GetApplicationDatabaseOwner(), - appPassword) + appPassword, + utils.UserTypeApp) cluster.SetInheritedDataAndOwnership(&appSecret.ObjectMeta) return createOrPatchClusterCredentialSecret(ctx, r.Client, appSecret) @@ -344,7 +348,7 @@ func (r *ClusterReconciler) reconcileManagedServices(ctx context.Context, cluste // we delete the old managed services not appearing anymore in the spec var livingServices corev1.ServiceList - if err := r.Client.List(ctx, &livingServices, client.InNamespace(cluster.Namespace), client.MatchingLabels{ + if err := r.List(ctx, &livingServices, client.InNamespace(cluster.Namespace), client.MatchingLabels{ utils.IsManagedLabelName: "true", utils.ClusterLabelName: cluster.Name, }); err != nil { @@ -390,13 +394,13 @@ func (r *ClusterReconciler) serviceReconciler( ) var livingService corev1.Service - err := r.Client.Get(ctx, types.NamespacedName{Name: proposed.Name, Namespace: proposed.Namespace}, &livingService) + err := r.Get(ctx, types.NamespacedName{Name: proposed.Name, Namespace: proposed.Namespace}, &livingService) if apierrs.IsNotFound(err) { if !enabled { return nil } contextLogger.Info("creating service") - return r.Client.Create(ctx, proposed) + return r.Create(ctx, proposed) } if err != nil { return err @@ -413,7 +417,7 @@ func (r *ClusterReconciler) serviceReconciler( if !enabled { contextLogger.Info("deleting service, due to not being managed anymore") - return r.Client.Delete(ctx, &livingService) + return r.Delete(ctx, &livingService) } var shouldUpdate bool @@ -449,11 +453,11 @@ func (r *ClusterReconciler) serviceReconciler( if strategy == apiv1.ServiceUpdateStrategyPatch { contextLogger.Info("reconciling service") // we update to ensure that we substitute the selectors - return r.Client.Update(ctx, &livingService) + return r.Update(ctx, &livingService) } contextLogger.Info("deleting the service") - if err := r.Client.Delete(ctx, &livingService); err != nil { + if err := r.Delete(ctx, &livingService); err != nil { return err } @@ -1068,6 +1072,14 @@ func (r *ClusterReconciler) createPrimaryInstance( // reconciliation loop is started by the informers. contextLogger.Info("refusing to create the primary instance while the latest generated serial is not zero", "latestGeneratedNode", cluster.Status.LatestGeneratedNode) + + if err := r.RegisterPhase(ctx, cluster, + apiv1.PhaseUnrecoverable, + "One or more instances were previously created, but no PersistentVolumeClaims (PVCs) exist. "+ + "The cluster is in an unrecoverable state. To resolve this, restore the cluster from a recent backup.", + ); err != nil { + return ctrl.Result{}, fmt.Errorf("while registering the unrecoverable phase: %w", err) + } return ctrl.Result{}, nil } @@ -1109,7 +1121,7 @@ func (r *ClusterReconciler) createPrimaryInstance( recoverySnapshot, nodeSerial, ); err != nil { - return ctrl.Result{RequeueAfter: time.Minute}, err + return ctrl.Result{}, fmt.Errorf("cannot create primary instance PVCs: %w", err) } // We are bootstrapping a cluster and in need to create the first node @@ -1162,10 +1174,9 @@ func (r *ClusterReconciler) createPrimaryInstance( } contextLogger.Info("Creating new Job", - "name", job.Name, + "jobName", job.Name, "primary", true) - utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) utils.InheritAnnotations(&job.ObjectMeta, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritAnnotations(&job.Spec.Template.ObjectMeta, cluster.Annotations, @@ -1226,7 +1237,7 @@ func (r *ClusterReconciler) joinReplicaInstance( var backupList apiv1.BackupList if err := r.List(ctx, &backupList, - client.MatchingFields{clusterName: cluster.Name}, + client.MatchingFields{clusterNameField: cluster.Name}, client.InNamespace(cluster.Namespace), ); err != nil { contextLogger.Error(err, "Error while getting backup list, when bootstrapping a new replica") @@ -1245,7 +1256,7 @@ func (r *ClusterReconciler) joinReplicaInstance( "job", job.Name, "primary", false, "storageSource", storageSource, - "role", job.Spec.Template.ObjectMeta.Labels[utils.JobRoleLabelName], + "role", job.Spec.Template.Labels[utils.JobRoleLabelName], ) r.Recorder.Eventf(cluster, "Normal", "CreatingInstance", @@ -1262,7 +1273,6 @@ func (r *ClusterReconciler) joinReplicaInstance( return ctrl.Result{}, err } - utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) utils.InheritAnnotations(&job.ObjectMeta, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritAnnotations(&job.Spec.Template.ObjectMeta, cluster.Annotations, @@ -1290,7 +1300,7 @@ func (r *ClusterReconciler) joinReplicaInstance( storageSource, nodeSerial, ); err != nil { - return ctrl.Result{RequeueAfter: time.Minute}, err + return ctrl.Result{}, fmt.Errorf("cannot create replica instance PVCs: %w", err) } return ctrl.Result{RequeueAfter: 30 * time.Second}, ErrNextLoop @@ -1305,7 +1315,7 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( ) (ctrl.Result, error) { contextLogger := log.FromContext(ctx) - instanceToCreate, err := findInstancePodToCreate(cluster, instancesStatus, resources.pvcs.Items) + instanceToCreate, err := findInstancePodToCreate(ctx, cluster, instancesStatus, resources.pvcs.Items) if err != nil { return ctrl.Result{}, err } @@ -1340,19 +1350,6 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( ) return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop } - - if configuration.Current.EnableAzurePVCUpdates { - for _, resizingPVC := range cluster.Status.ResizingPVC { - // if the pvc is in resizing state we requeue and wait - if resizingPVC == instancePVC.Name { - contextLogger.Info( - "PVC is in resizing status, retrying in 5 seconds", - "instance", instanceToCreate.Name, - ) - return ctrl.Result{RequeueAfter: 5 * time.Second}, ErrNextLoop - } - } - } } // If this cluster has been restarted, mark the Pod with the latest restart time @@ -1371,7 +1368,6 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( return ctrl.Result{}, fmt.Errorf("unable to set the owner reference for the Pod: %w", err) } - utils.SetOperatorVersion(&instanceToCreate.ObjectMeta, versions.Version) utils.InheritAnnotations(&instanceToCreate.ObjectMeta, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritLabels(&instanceToCreate.ObjectMeta, cluster.Labels, @@ -1393,6 +1389,7 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( // we elect a current instance that doesn't exist for creation func findInstancePodToCreate( + ctx context.Context, cluster *apiv1.Cluster, instancesStatus postgres.PostgresqlStatusList, pvcs []corev1.PersistentVolumeClaim, @@ -1439,7 +1436,7 @@ func findInstancePodToCreate( if err != nil { return nil, err } - return specs.PodWithExistingStorage(*cluster, serial), nil + return specs.NewInstance(ctx, *cluster, serial, true) } return nil, nil diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go index c7068a15d7..9db569a615 100644 --- a/internal/controller/cluster_create_test.go +++ b/internal/controller/cluster_create_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -182,13 +185,13 @@ var _ = Describe("cluster_create unit tests", func() { svc.Spec.Selector = map[string]string{ "outdated": "selector", } - err := env.clusterReconciler.Client.Create(ctx, svc) + err := env.clusterReconciler.Create(ctx, svc) Expect(err).ToNot(HaveOccurred()) } checkService := func(before *corev1.Service, expectedLabels map[string]string) { var afterChangesService corev1.Service - err := env.clusterReconciler.Client.Get(ctx, types.NamespacedName{ + err := env.clusterReconciler.Get(ctx, types.NamespacedName{ Name: before.Name, Namespace: before.Namespace, }, &afterChangesService) @@ -306,7 +309,7 @@ var _ = Describe("cluster_create unit tests", func() { By("executing createOrPatchServiceAccount (patch)", func() { By("setting owner reference to nil", func() { - sa.ObjectMeta.OwnerReferences = nil + sa.OwnerReferences = nil err := env.client.Update(context.Background(), sa) Expect(err).ToNot(HaveOccurred()) }) @@ -822,14 +825,12 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { namespace = "test-namespace" ) var ( - ctx context.Context proposed *corev1.Secret cli k8client.Client ) BeforeEach(func() { cli = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() - ctx = context.TODO() const secretName = "test-secret" proposed = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -843,7 +844,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret does not exist", func() { - It("should create the secret", func() { + It("should create the secret", func(ctx SpecContext) { err := createOrPatchClusterCredentialSecret(ctx, cli, proposed) Expect(err).NotTo(HaveOccurred()) @@ -857,7 +858,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret exists and is owned by the cluster", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { existingSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -870,7 +871,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { cluster := apiv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiGVString, + APIVersion: apiSGVString, }, ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, } @@ -878,7 +879,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(cli.Create(ctx, existingSecret)).To(Succeed()) }) - It("should patch the secret if metadata differs", func() { + It("should patch the secret if metadata differs", func(ctx SpecContext) { Expect(proposed.Labels).To(HaveKeyWithValue("test", "label")) Expect(proposed.Annotations).To(HaveKeyWithValue("test", "annotation")) @@ -892,15 +893,15 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(patchedSecret.Annotations).To(HaveKeyWithValue("test", "annotation")) }) - It("should not patch the secret if metadata is the same", func() { + It("should not patch the secret if metadata is the same", func(ctx SpecContext) { var originalSecret corev1.Secret err := cli.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, &originalSecret) Expect(err).NotTo(HaveOccurred()) // Assuming secretName is the name of the existing secret - proposed.ObjectMeta.Name = secretName - proposed.ObjectMeta.Labels = map[string]string{"old": "label"} - proposed.ObjectMeta.Annotations = map[string]string{"old": "annotation"} + proposed.Name = secretName + proposed.Labels = map[string]string{"old": "label"} + proposed.Annotations = map[string]string{"old": "annotation"} err = createOrPatchClusterCredentialSecret(ctx, cli, proposed) Expect(err).NotTo(HaveOccurred()) @@ -913,7 +914,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret exists but is not owned by the cluster", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { existingSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -923,7 +924,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(cli.Create(ctx, existingSecret)).To(Succeed()) }) - It("should not modify the secret", func() { + It("should not modify the secret", func(ctx SpecContext) { var originalSecret corev1.Secret err := cli.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, &originalSecret) Expect(err).NotTo(HaveOccurred()) @@ -1006,7 +1007,7 @@ var _ = Describe("createOrPatchOwnedPodDisruptionBudget", func() { }) It("should update the existing PodDisruptionBudget if the metadata is different", func() { - pdb.ObjectMeta.Labels["newlabel"] = "newvalue" + pdb.Labels["newlabel"] = "newvalue" err = reconciler.createOrPatchOwnedPodDisruptionBudget(ctx, cluster, pdb) Expect(err).ShouldNot(HaveOccurred()) @@ -1161,7 +1162,7 @@ var _ = Describe("Service Reconciling", func() { cluster = apiv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", @@ -1294,19 +1295,19 @@ var _ = Describe("Service Reconciling", func() { It("should create the default services", func() { err := reconciler.reconcilePostgresServices(ctx, &cluster) Expect(err).NotTo(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadWriteName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(err).ToNot(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(err).ToNot(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadOnlyName(), Namespace: cluster.Namespace}, &corev1.Service{}, @@ -1322,19 +1323,19 @@ var _ = Describe("Service Reconciling", func() { } err := reconciler.reconcilePostgresServices(ctx, &cluster) Expect(err).NotTo(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadWriteName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(apierrs.IsNotFound(err)).To(BeTrue()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(apierrs.IsNotFound(err)).To(BeTrue()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadOnlyName(), Namespace: cluster.Namespace}, &corev1.Service{}, diff --git a/internal/controller/cluster_delete.go b/internal/controller/cluster_delete.go index cd775442e7..44bf9945fe 100644 --- a/internal/controller/cluster_delete.go +++ b/internal/controller/cluster_delete.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_delete_test.go b/internal/controller/cluster_delete_test.go index 5a1d6be4a3..cb42c79dd0 100644 --- a/internal/controller/cluster_delete_test.go +++ b/internal/controller/cluster_delete_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -113,7 +116,7 @@ var _ = Describe("ensures that deleteDanglingMonitoringQueries works correctly", cluster.Spec.Monitoring = &apiv1.MonitoringConfiguration{ DisableDefaultQueries: ptr.To(false), } - err := crReconciler.Client.Update(context.Background(), cluster) + err := crReconciler.Update(context.Background(), cluster) Expect(err).ToNot(HaveOccurred()) }) diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 95ac9ad668..21cbba1dae 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -20,7 +23,9 @@ import ( "context" "fmt" + "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -31,32 +36,98 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" ) -// reconcileImage sets the image inside the status, to be used by the following -// functions of the reconciler loop +// reconcileImage processes the image request, executes it, and stores +// the result in the .status.image field. If the user requested a +// major version upgrade, the current image is saved in the +// .status.pgDataImageInfo field. func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.Cluster) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) - oldCluster := cluster.DeepCopy() + requestedImageInfo, err := r.getRequestedImageInfo(ctx, cluster) + if err != nil { + return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, err.Error()) + } - // If ImageName is defined and different from the current image in the status, we update the status - if cluster.Spec.ImageName != "" && cluster.Status.Image != cluster.Spec.ImageName { - cluster.Status.Image = cluster.Spec.ImageName - if err := r.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil { - contextLogger.Error( - err, - "While patching cluster status to set the image name from the cluster Spec", - "imageName", cluster.Status.Image, - ) - return nil, err - } + // Case 1: the cluster is being initialized and there is still no + // running image. In this case, we should simply apply the image selected by the user. + if cluster.Status.PGDataImageInfo == nil { + return nil, status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetImage(requestedImageInfo.Image), + status.SetPGDataImageInfo(&requestedImageInfo), + ) + } + + // Case 2: there's a running image. The code checks if the user selected + // an image of the same major version or if a change in the major + // version has been requested. + if requestedImageInfo.Image == cluster.Status.PGDataImageInfo.Image { + // The requested image is the same as the current one, no action needed return nil, nil } - // If ImageName was defined, we rely on what the user requested + currentMajorVersion := cluster.Status.PGDataImageInfo.MajorVersion + requestedMajorVersion := requestedImageInfo.MajorVersion + + if currentMajorVersion > requestedMajorVersion { + // Major version downgrade requested. This is not allowed. + contextLogger.Info( + "Cannot downgrade the PostgreSQL major version. Forcing the current requestedImageInfo.", + "currentImage", cluster.Status.PGDataImageInfo.Image, + "requestedImage", requestedImageInfo) + return nil, fmt.Errorf("cannot downgrade the PostgreSQL major version from %d to %d", + currentMajorVersion, requestedMajorVersion) + } + + if currentMajorVersion < requestedMajorVersion { + // Major version upgrade requested + return nil, status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetImage(requestedImageInfo.Image), + ) + } + + // The major versions are the same, but the images are different. + // This is a minor version upgrade/downgrade. + return nil, status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetImage(requestedImageInfo.Image), + status.SetPGDataImageInfo(&requestedImageInfo)) +} + +func getImageInfoFromImage(image string) (apiv1.ImageInfo, error) { + // Parse the version from the tag + imageVersion, err := version.FromTag(reference.New(image).Tag) + if err != nil { + return apiv1.ImageInfo{}, fmt.Errorf("cannot parse version from image %s: %w", image, err) + } + + return apiv1.ImageInfo{ + Image: image, + MajorVersion: int(imageVersion.Major()), //nolint:gosec + }, nil +} + +func (r *ClusterReconciler) getRequestedImageInfo( + ctx context.Context, cluster *apiv1.Cluster, +) (apiv1.ImageInfo, error) { + contextLogger := log.FromContext(ctx) + if cluster.Spec.ImageCatalogRef == nil { - return nil, nil + if cluster.Spec.ImageName != "" { + return getImageInfoFromImage(cluster.Spec.ImageName) + } + + return apiv1.ImageInfo{}, fmt.Errorf("ImageName is not defined and no catalog is referenced") } contextLogger = contextLogger.WithValues("catalogRef", cluster.Spec.ImageCatalogRef) @@ -71,28 +142,27 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C catalog = &apiv1.ImageCatalog{} default: contextLogger.Info("Unknown catalog kind") - return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, - "Invalid image catalog type") + return apiv1.ImageInfo{}, fmt.Errorf("invalid image catalog type") } apiGroup := cluster.Spec.ImageCatalogRef.APIGroup - if apiGroup == nil || *apiGroup != apiv1.GroupVersion.Group { + if apiGroup == nil || *apiGroup != apiv1.SchemeGroupVersion.Group { contextLogger.Info("Unknown catalog group") - return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, - "Invalid image catalog group") + return apiv1.ImageInfo{}, fmt.Errorf("invalid image catalog group") } // Get the referenced catalog catalogName := cluster.Spec.ImageCatalogRef.Name - err := r.Client.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: catalogName}, catalog) + err := r.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: catalogName}, catalog) if err != nil { if apierrs.IsNotFound(err) { r.Recorder.Eventf(cluster, "Warning", "DiscoverImage", "Cannot get %v/%v", catalogKind, catalogName) - return &ctrl.Result{}, nil + contextLogger.Info("catalog not found", "catalogKind", catalogKind, "catalogName", catalogName) + return apiv1.ImageInfo{}, fmt.Errorf("catalog %s/%s not found", catalogKind, catalogName) } - return nil, err + return apiv1.ImageInfo{}, err } // Catalog found, we try to find the image for the major version @@ -108,25 +178,10 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C catalogName) contextLogger.Info("cannot find requested major version", "requestedMajorVersion", requestedMajorVersion) - return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, - "Selected major version is not available in the catalog") - } - - // If the image is different, we set it into the cluster status - if cluster.Spec.ImageName != catalogImage { - cluster.Status.Image = catalogImage - patch := client.MergeFrom(oldCluster) - if err := r.Status().Patch(ctx, cluster, patch); err != nil { - patchBytes, _ := patch.Data(cluster) - contextLogger.Error( - err, - "While patching cluster status to set the image name from the catalog", - "patch", string(patchBytes)) - return nil, err - } + return apiv1.ImageInfo{}, fmt.Errorf("selected major version is not available in the catalog") } - return nil, nil + return apiv1.ImageInfo{Image: catalogImage, MajorVersion: requestedMajorVersion}, nil } func (r *ClusterReconciler) getClustersForImageCatalogsToClustersMapper( @@ -139,7 +194,7 @@ func (r *ClusterReconciler) getClustersForImageCatalogsToClustersMapper( } listOps := &client.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(".spec.imageCatalog.name", object.GetName()), + FieldSelector: fields.OneTermEqualSelector(imageCatalogKey, object.GetName()), Namespace: object.GetNamespace(), } @@ -186,7 +241,7 @@ func (r *ClusterReconciler) getClustersForClusterImageCatalogsToClustersMapper( } listOps := &client.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(".spec.imageCatalog.name", object.GetName()), + FieldSelector: fields.OneTermEqualSelector(imageCatalogKey, object.GetName()), } err = r.List(ctx, &clusters, listOps) diff --git a/internal/controller/cluster_image_test.go b/internal/controller/cluster_image_test.go new file mode 100644 index 0000000000..11123505b2 --- /dev/null +++ b/internal/controller/cluster_image_test.go @@ -0,0 +1,225 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func newFakeReconcilerFor(cluster *apiv1.Cluster, catalog *apiv1.ImageCatalog) *ClusterReconciler { + fakeClient := fake.NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithRuntimeObjects(cluster). + WithStatusSubresource(cluster). + Build() + + if catalog != nil { + _ = fakeClient.Create(context.Background(), catalog) + } + + return &ClusterReconciler{ + Client: fakeClient, + Recorder: record.NewFakeRecorder(10), + } +} + +var _ = Describe("Cluster image detection", func() { + It("gets the image from .spec.imageName", func(ctx SpecContext) { + // This is a simple situation, having a cluster with an + // explicit image. The image should be directly set into the + // status and the reconciliation loop can proceed. + // No major version upgrade have been requested. + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15.2", + }, + } + r := newFakeReconcilerFor(cluster, nil) + + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(15)) + }) + + It("gets the image from an image catalog", func(ctx SpecContext) { + // This is slightly more complex, having an image catalog reference + // instead of an explicit image name. No major version upgrade have + // been requested, the reconciliation loop can proceed correctly + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "catalog", + Kind: "ImageCatalog", + APIGroup: &apiv1.SchemeGroupVersion.Group, + }, + Major: 15, + }, + }, + } + catalog := &apiv1.ImageCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: "catalog", + Namespace: "default", + }, + Spec: apiv1.ImageCatalogSpec{ + Images: []apiv1.CatalogImage{ + { + Image: "postgres:15.2", + Major: 15, + }, + }, + }, + } + + r := newFakeReconcilerFor(cluster, catalog) + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(15)) + }) + + It("gets the name from the image catalog, but the catalog is incomplete", func(ctx SpecContext) { + // As a variant of the previous case, the catalog may be + // incomplete and have no image for the selected major. When + // this happens, the reconciliation loop should be stopped and + // the proper phase should be set. + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "catalog", + Kind: "ImageCatalog", + APIGroup: &apiv1.SchemeGroupVersion.Group, + }, + Major: 15, + }, + }, + } + catalog := &apiv1.ImageCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: "catalog", + Namespace: "default", + }, + Spec: apiv1.ImageCatalogSpec{ + Images: []apiv1.CatalogImage{ + { + Image: "postgres:17.4", + Major: 17, + }, + }, + }, + } + + r := newFakeReconcilerFor(cluster, catalog) + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).ToNot(BeNil()) + + Expect(cluster.Status.Phase).To(Equal(apiv1.PhaseImageCatalogError)) + }) + + It("skips major version downgrades", func(ctx SpecContext) { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15.2", + }, + Status: apiv1.ClusterStatus{ + Image: "postgres:16.2", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:16.2", + MajorVersion: 16, + }, + }, + } + + r := newFakeReconcilerFor(cluster, nil) + + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().Should(HaveOccurred()) + Expect(err).Error().Should(MatchError("cannot downgrade the PostgreSQL major version from 16 to 15")) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:16.2")) + }) + + It("process major version upgrades", func(ctx SpecContext) { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.2", + }, + Status: apiv1.ClusterStatus{ + Image: "postgres:16.2", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:16.2", + MajorVersion: 16, + }, + }, + } + + r := newFakeReconcilerFor(cluster, nil) + + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:17.2")) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:16.2")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(16)) + }) +}) diff --git a/internal/controller/cluster_pki.go b/internal/controller/cluster_pki.go index ea79ff06a5..ab8575cd1f 100644 --- a/internal/controller/cluster_pki.go +++ b/internal/controller/cluster_pki.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -43,23 +46,7 @@ func (r *ClusterReconciler) setupPostgresPKI(ctx context.Context, cluster *apiv1 return fmt.Errorf("generating server CA certificate: %w", err) } - // This is the certificate for the server - serverCertificateName := client.ObjectKey{Namespace: cluster.GetNamespace(), Name: cluster.GetServerTLSSecretName()} - opts := x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}} - err = r.ensureServerLeafCertificate( - ctx, - cluster, - serverCertificateName, - cluster.GetServiceReadWriteName(), - serverCaSecret, - certs.CertTypeServer, - cluster.GetClusterAltDNSNames(), - &opts) - if err != nil { - if apierrors.IsNotFound(err) { - return fmt.Errorf("missing specified server TLS secret %s: %w", - cluster.Status.Certificates.ServerTLSSecret, err) - } + if err = r.ensureServerLeafCertificate(ctx, cluster, serverCaSecret); err != nil { return fmt.Errorf("generating server TLS certificate: %w", err) } @@ -71,20 +58,7 @@ func (r *ClusterReconciler) setupPostgresPKI(ctx context.Context, cluster *apiv1 return fmt.Errorf("generating client CA certificate: %w", err) } - // Generating postgres client certificate - replicationSecretName := client.ObjectKey{ - Namespace: cluster.GetNamespace(), - Name: cluster.GetReplicationSecretName(), - } - err = r.ensureReplicationClientLeafCertificate( - ctx, - cluster, - replicationSecretName, - apiv1.StreamingReplicationUser, - clientCaSecret, - certs.CertTypeClient, - nil, - &x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}}) + err = r.ensureReplicationClientLeafCertificate(ctx, cluster, clientCaSecret) if err != nil { if apierrors.IsNotFound(err) { return fmt.Errorf("missing specified streaming replication client TLS secret %s: %w", @@ -112,7 +86,7 @@ func (r *ClusterReconciler) ensureClientCASecret(ctx context.Context, cluster *a return nil, err } - err = r.verifyCAValidity(secret, cluster) + err = r.verifyCAValidity(ctx, secret, cluster) if err != nil { return nil, err } @@ -149,7 +123,7 @@ func (r *ClusterReconciler) ensureServerCASecret(ctx context.Context, cluster *a return nil, err } - err = r.verifyCAValidity(secret, cluster) + err = r.verifyCAValidity(ctx, secret, cluster) if err != nil { return nil, err } @@ -168,7 +142,9 @@ func (r *ClusterReconciler) ensureServerCASecret(ctx context.Context, cluster *a return &secret, nil } -func (r *ClusterReconciler) verifyCAValidity(secret v1.Secret, cluster *apiv1.Cluster) error { +func (r *ClusterReconciler) verifyCAValidity(ctx context.Context, secret v1.Secret, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx) + // Verify validity of the CA and expiration (only ca.crt) publicKey, ok := secret.Data[certs.CACertKey] if !ok { @@ -185,7 +161,7 @@ func (r *ClusterReconciler) verifyCAValidity(secret v1.Secret, cluster *apiv1.Cl } else if isExpiring { r.Recorder.Event(cluster, "Warning", "SecretIsExpiring", "Checking expiring date of secret "+secret.Name) - log.Info("CA certificate is expiring or is already expired", "secret", secret.Name) + contextLogger.Info("CA certificate is expiring or is already expired", "secret", secret.Name) } return nil @@ -253,24 +229,34 @@ func (r *ClusterReconciler) renewCASecret(ctx context.Context, secret *v1.Secret func (r *ClusterReconciler) ensureServerLeafCertificate( ctx context.Context, cluster *apiv1.Cluster, - secretName client.ObjectKey, - commonName string, caSecret *v1.Secret, - usage certs.CertType, - altDNSNames []string, - opts *x509.VerifyOptions, ) error { + // This is the certificate for the server + secretName := client.ObjectKey{Namespace: cluster.GetNamespace(), Name: cluster.GetServerTLSSecretName()} + // If not specified generate/renew if cluster.Spec.Certificates == nil || cluster.Spec.Certificates.ServerTLSSecret == "" { - return r.ensureLeafCertificate(ctx, cluster, secretName, commonName, caSecret, usage, altDNSNames, nil) + return r.ensureLeafCertificate( + ctx, + cluster, + secretName, + cluster.GetServiceReadWriteName(), + caSecret, + certs.CertTypeServer, + cluster.GetClusterAltDNSNames(), + nil, + ) } var serverSecret v1.Secret - err := r.Get(ctx, secretName, &serverSecret) - if err != nil { + if err := r.Get(ctx, secretName, &serverSecret); apierrors.IsNotFound(err) { + return fmt.Errorf("missing specified server TLS secret %s: %w", + cluster.Status.Certificates.ServerTLSSecret, err) + } else if err != nil { return err } + opts := &x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}} return validateLeafCertificate(caSecret, &serverSecret, opts) } @@ -279,24 +265,37 @@ func (r *ClusterReconciler) ensureServerLeafCertificate( func (r *ClusterReconciler) ensureReplicationClientLeafCertificate( ctx context.Context, cluster *apiv1.Cluster, - secretName client.ObjectKey, - commonName string, caSecret *v1.Secret, - usage certs.CertType, - altDNSNames []string, - opts *x509.VerifyOptions, ) error { + // Generating postgres client certificate + replicationSecretObjectKey := client.ObjectKey{ + Namespace: cluster.GetNamespace(), + Name: cluster.GetReplicationSecretName(), + } + // If not specified generate/renew if cluster.Spec.Certificates == nil || cluster.Spec.Certificates.ReplicationTLSSecret == "" { - return r.ensureLeafCertificate(ctx, cluster, secretName, commonName, caSecret, usage, altDNSNames, nil) + return r.ensureLeafCertificate( + ctx, + cluster, + replicationSecretObjectKey, + apiv1.StreamingReplicationUser, + caSecret, + certs.CertTypeClient, + nil, + nil, + ) } var replicationClientSecret v1.Secret - err := r.Get(ctx, secretName, &replicationClientSecret) - if err != nil { + if err := r.Get(ctx, replicationSecretObjectKey, &replicationClientSecret); apierrors.IsNotFound(err) { + return fmt.Errorf("missing specified replication TLS secret %s: %w", + replicationSecretObjectKey.Name, err) + } else if err != nil { return err } + opts := &x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}} return validateLeafCertificate(caSecret, &replicationClientSecret, opts) } @@ -329,23 +328,26 @@ func (r *ClusterReconciler) ensureLeafCertificate( ) error { var secret v1.Secret err := r.Get(ctx, secretName, &secret) - if err == nil { + switch { + case err == nil: return r.renewAndUpdateCertificate(ctx, caSecret, &secret, altDNSNames) - } - - serverSecret, err := generateCertificateFromCA(caSecret, commonName, usage, altDNSNames, secretName) - if err != nil { - return err - } + case apierrors.IsNotFound(err): + serverSecret, err := generateCertificateFromCA(caSecret, commonName, usage, altDNSNames, secretName) + if err != nil { + return err + } - utils.SetAsOwnedBy(&serverSecret.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta) - for k, v := range additionalLabels { - if serverSecret.Labels == nil { - serverSecret.Labels = make(map[string]string) + utils.SetAsOwnedBy(&serverSecret.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta) + for k, v := range additionalLabels { + if serverSecret.Labels == nil { + serverSecret.Labels = make(map[string]string) + } + serverSecret.Labels[k] = v } - serverSecret.Labels[k] = v + return r.Create(ctx, serverSecret) + default: + return err } - return r.Create(ctx, serverSecret) } // generateCertificateFromCA create a certificate secret using the provided CA secret diff --git a/internal/controller/cluster_plugins.go b/internal/controller/cluster_plugins.go index 2b6b31f4a0..56e9b435e0 100644 --- a/internal/controller/cluster_plugins.go +++ b/internal/controller/cluster_plugins.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the controller of the CRD @@ -24,13 +27,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + cnpgiclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" ) // updatePluginsStatus ensures that we load the plugins that are required to reconcile // this cluster func (r *ClusterReconciler) updatePluginsStatus(ctx context.Context, cluster *apiv1.Cluster) error { // Load the plugins - pluginClient := getPluginClientFromContext(ctx) + pluginClient := cnpgiclient.GetPluginClientFromContext(ctx) // Get the status of the plugins and store it inside the status section oldCluster := cluster.DeepCopy() @@ -43,6 +47,7 @@ func (r *ClusterReconciler) updatePluginsStatus(ctx context.Context, cluster *ap cluster.Status.PluginStatus[i].OperatorCapabilities = entry.OperatorCapabilities cluster.Status.PluginStatus[i].WALCapabilities = entry.WALCapabilities cluster.Status.PluginStatus[i].BackupCapabilities = entry.BackupCapabilities + cluster.Status.PluginStatus[i].RestoreJobHookCapabilities = entry.RestoreJobHookCapabilities } // If nothing changes, there's no need to hit the API server diff --git a/internal/controller/cluster_predicates.go b/internal/controller/cluster_predicates.go index a8e96dfde4..8647337f43 100644 --- a/internal/controller/cluster_predicates.go +++ b/internal/controller/cluster_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( + "slices" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -69,12 +74,42 @@ var ( return isUsefulClusterSecret(e.ObjectNew) }, } +) - nodesPredicate = predicate.Funcs{ +func (r *ClusterReconciler) nodesPredicate() predicate.Funcs { + return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { oldNode, oldOk := e.ObjectOld.(*corev1.Node) newNode, newOk := e.ObjectNew.(*corev1.Node) - return oldOk && newOk && oldNode.Spec.Unschedulable != newNode.Spec.Unschedulable + if !oldOk || !newOk { + return false + } + + if oldNode.Spec.Unschedulable != newNode.Spec.Unschedulable { + return true + } + + // check if any of the watched drain taints have changed. + for _, taint := range r.drainTaints { + oldTaintIndex := slices.IndexFunc(oldNode.Spec.Taints, func(t corev1.Taint) bool { return t.Key == taint }) + newTaintIndex := slices.IndexFunc(newNode.Spec.Taints, func(t corev1.Taint) bool { return t.Key == taint }) + + switch { + case oldTaintIndex == -1 && newTaintIndex == -1: + continue + case oldTaintIndex == -1 || newTaintIndex == -1: + return true + } + + // exists in both - check if value or effect is different + oldTaint := oldNode.Spec.Taints[oldTaintIndex] + newTaint := newNode.Spec.Taints[newTaintIndex] + if oldTaint.Value != newTaint.Value || oldTaint.Effect != newTaint.Effect { + return true + } + } + + return false }, CreateFunc: func(_ event.CreateEvent) bool { return false @@ -86,7 +121,7 @@ var ( return false }, } -) +} func isOwnedByClusterOrSatisfiesPredicate( object client.Object, diff --git a/internal/controller/cluster_predicates_test.go b/internal/controller/cluster_predicates_test.go new file mode 100644 index 0000000000..6eb4553696 --- /dev/null +++ b/internal/controller/cluster_predicates_test.go @@ -0,0 +1,149 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("nodesPredicate", func() { + fakeReconciler := &ClusterReconciler{ + drainTaints: configuration.DefaultDrainTaints, + } + nodesPredicateFunctions := fakeReconciler.nodesPredicate() + + pod := &corev1.Pod{} + nodeWithNoTaints := &corev1.Node{} + unschedulableNode := &corev1.Node{ + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + } + nodeWithKarpenterNoSchedulableTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "karpenter.sh/disrupted", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + nodeWithKarpenterNoExecuteTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "karpenter.sh/disrupted", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + } + nodeWithAutoscalerTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "ToBeDeletedByClusterAutoscaler", + }, + }, + }, + } + + DescribeTable( + "always skips node creation", + func(node client.Object, expectedResult bool) { + createEvent := event.CreateEvent{ + Object: node, + } + + result := nodesPredicateFunctions.Create(createEvent) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with a node", nodeWithNoTaints, false), + Entry("with a pod", pod, false), + ) + + DescribeTable( + "always skips node delete", + func(node client.Object, expectedResult bool) { + deleteEvent := event.DeleteEvent{ + Object: node, + } + + result := nodesPredicateFunctions.Delete(deleteEvent) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with a node", nodeWithNoTaints, false), + Entry("with a pod", pod, false), + ) + + DescribeTable( + "always skips generic events", + func(node client.Object, expectedResult bool) { + genericEvent := event.GenericEvent{ + Object: node, + } + + result := nodesPredicateFunctions.Generic(genericEvent) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with a node", nodeWithNoTaints, false), + Entry("with a pod", pod, false), + ) + + DescribeTable( + "node updates", + func(objectOld, objectNew client.Object, expectedResult bool) { + updateEventOldToNew := event.UpdateEvent{ + ObjectOld: objectOld, + ObjectNew: objectNew, + } + updateEventNewToOld := event.UpdateEvent{ + ObjectOld: objectOld, + ObjectNew: objectNew, + } + + result := nodesPredicateFunctions.Update(updateEventOldToNew) + Expect(result).To(Equal(expectedResult)) + + result = nodesPredicateFunctions.Update(updateEventNewToOld) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with the same node", + nodeWithNoTaints, nodeWithNoTaints, false), + Entry("with the same tainted node", + nodeWithKarpenterNoSchedulableTaint, nodeWithKarpenterNoSchedulableTaint, false), + Entry("when a node becomes unschedulable", + nodeWithNoTaints, unschedulableNode, true), + Entry("when a node gets the karpenter disruption taint", + nodeWithNoTaints, nodeWithKarpenterNoSchedulableTaint, true), + Entry("when a node gets the karpenter disruption taint value changed", + nodeWithKarpenterNoSchedulableTaint, nodeWithKarpenterNoExecuteTaint, true), + Entry("when a node taints changed", + nodeWithKarpenterNoSchedulableTaint, nodeWithAutoscalerTaint, true), + ) +}) diff --git a/internal/controller/cluster_restore.go b/internal/controller/cluster_restore.go index 8edd37eb30..7205e50815 100644 --- a/internal/controller/cluster_restore.go +++ b/internal/controller/cluster_restore.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -308,7 +311,7 @@ func getNodeSerialsFromPVCs( highestSerial = serial } - instanceRole, _ := utils.GetInstanceRole(pvc.ObjectMeta.Labels) + instanceRole, _ := utils.GetInstanceRole(pvc.Labels) if instanceRole == specs.ClusterRoleLabelPrimary { primarySerial = serial } diff --git a/internal/controller/cluster_restore_test.go b/internal/controller/cluster_restore_test.go index 616b0cf882..fc67aa18ed 100644 --- a/internal/controller/cluster_restore_test.go +++ b/internal/controller/cluster_restore_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -37,19 +40,17 @@ import ( var _ = Describe("ensureClusterIsNotFenced", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster ) - getCluster := func(clusterKey k8client.ObjectKey) (*apiv1.Cluster, error) { + getCluster := func(ctx context.Context, clusterKey k8client.ObjectKey) (*apiv1.Cluster, error) { remoteCluster := &apiv1.Cluster{} err := mockCli.Get(ctx, clusterKey, remoteCluster) return remoteCluster, err } BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -67,14 +68,14 @@ var _ = Describe("ensureClusterIsNotFenced", func() { }) Context("when no instances are fenced", func() { - It("should not modify the object", func() { - origCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + It("should not modify the object", func(ctx SpecContext) { + origCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) err = ensureClusterIsNotFenced(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) - remoteCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + remoteCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(remoteCluster.ObjectMeta).To(Equal(origCluster.ObjectMeta)) }) @@ -91,15 +92,15 @@ var _ = Describe("ensureClusterIsNotFenced", func() { Build() }) - It("should patch the cluster and remove fenced instances", func() { - origCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + It("should patch the cluster and remove fenced instances", func(ctx SpecContext) { + origCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(origCluster.Annotations).To(HaveKey(utils.FencedInstanceAnnotation)) err = ensureClusterIsNotFenced(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) - remoteCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + remoteCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(remoteCluster.ObjectMeta).ToNot(Equal(origCluster.ObjectMeta)) @@ -110,13 +111,11 @@ var _ = Describe("ensureClusterIsNotFenced", func() { var _ = Describe("restoreClusterStatus", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster ) BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -131,7 +130,7 @@ var _ = Describe("restoreClusterStatus", func() { }) Context("when restoring cluster status", func() { - It("should patch the cluster with the updated status", func() { + It("should patch the cluster with the updated status", func(ctx SpecContext) { latestNodeSerial := 10 targetPrimaryNodeSerial := 3 @@ -151,7 +150,6 @@ var _ = Describe("restoreClusterStatus", func() { var _ = Describe("getOrphanPVCs", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster goodPvcs []corev1.PersistentVolumeClaim @@ -159,7 +157,6 @@ var _ = Describe("getOrphanPVCs", func() { ) BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -268,7 +265,7 @@ var _ = Describe("getOrphanPVCs", func() { Build() }) - It("should fetch only the pvcs that belong to the cluster and without an owner", func() { + It("should fetch only the pvcs that belong to the cluster and without an owner", func(ctx SpecContext) { remotePvcs, err := getOrphanPVCs(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) Expect(remotePvcs).To(HaveLen(len(goodPvcs))) @@ -290,7 +287,7 @@ var _ = Describe("getOrphanPVCs", func() { Expect(primary).To(Equal(2)) }) - It("should correctly restore the orphan pvcs", func() { + It("should correctly restore the orphan pvcs", func(ctx SpecContext) { err := restoreOrphanPVCs(ctx, mockCli, cluster, goodPvcs) Expect(err).ToNot(HaveOccurred()) @@ -489,7 +486,7 @@ var _ = Describe("ensureOrphanServicesAreNotPresent", func() { Namespace: cluster.Namespace, }, } - cluster.TypeMeta = metav1.TypeMeta{Kind: apiv1.ClusterKind, APIVersion: apiv1.GroupVersion.String()} + cluster.TypeMeta = metav1.TypeMeta{Kind: apiv1.ClusterKind, APIVersion: apiv1.SchemeGroupVersion.String()} cluster.SetInheritedDataAndOwnership(&svc.ObjectMeta) mockCli = fake.NewClientBuilder(). WithScheme(k8scheme.BuildWithAllKnownScheme()). diff --git a/internal/controller/cluster_scale.go b/internal/controller/cluster_scale.go index d4244a3b24..ae7889a96e 100644 --- a/internal/controller/cluster_scale.go +++ b/internal/controller/cluster_scale.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -29,7 +32,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // scaleDownCluster handles the scaling down operations of a PostgreSQL cluster. @@ -98,13 +101,25 @@ func (r *ClusterReconciler) ensureInstanceJobAreDeleted( ) error { contextLogger := log.FromContext(ctx) - for _, jobName := range specs.GetPossibleJobNames(instanceName) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: cluster.Namespace, - }, - } + var jobList batchv1.JobList + if err := r.List( + ctx, + &jobList, + client.InNamespace(cluster.Namespace), + client.MatchingFields{jobOwnerKey: cluster.Name}, + client.MatchingLabels{ + utils.InstanceNameLabelName: instanceName, + utils.ClusterLabelName: cluster.Name, + }, + client.HasLabels{ + utils.JobRoleLabelName, + }, + ); err != nil { + return fmt.Errorf("while looking for stale jobs of instance %s: %w", instanceName, err) + } + + for i := range jobList.Items { + job := &jobList.Items[i] // This job was working against the PVC of this Pod, // let's remove it diff --git a/internal/controller/cluster_scale_test.go b/internal/controller/cluster_scale_test.go index 6b01ab1854..7ee3ed3b07 100644 --- a/internal/controller/cluster_scale_test.go +++ b/internal/controller/cluster_scale_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -30,7 +33,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -169,43 +172,66 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { ) BeforeEach(func() { - fakeClientSet = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() + fakeClientSet = fake. + NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithIndex(&batchv1.Job{}, jobOwnerKey, jobOwnerIndexFunc). + Build() ctx, cancel = context.WithCancel(context.Background()) reconciler = &ClusterReconciler{ Client: fakeClientSet, } + instanceName = "test-instance" + cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", Namespace: "default", }, } - - instanceName = "test-instance" + cluster.TypeMeta = metav1.TypeMeta{ + Kind: apiv1.ClusterKind, + APIVersion: apiv1.SchemeGroupVersion.String(), + } }) AfterEach(func() { cancel() }) - It("should delete all the jobs", func() { - for _, jobName := range specs.GetPossibleJobNames(instanceName) { + It("creates the cluster", func(ctx SpecContext) { + err := fakeClientSet.Create(ctx, cluster) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should delete all the jobs", func(ctx SpecContext) { + jobNames := []string{ + cluster.Name + "-initdb", + cluster.Name + "-pgbasebackup", + } + for _, jobName := range jobNames { job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, Namespace: cluster.Namespace, + Labels: map[string]string{ + utils.InstanceNameLabelName: instanceName, + utils.ClusterLabelName: cluster.Name, + utils.JobRoleLabelName: "test", + }, }, } - err := fakeClientSet.Create(context.TODO(), job) + cluster.SetInheritedDataAndOwnership(&job.ObjectMeta) + err := fakeClientSet.Create(ctx, job) Expect(err).NotTo(HaveOccurred()) } err := reconciler.ensureInstanceJobAreDeleted(ctx, cluster, instanceName) Expect(err).NotTo(HaveOccurred()) - for _, jobName := range specs.GetPossibleJobNames(instanceName) { + for _, jobName := range jobNames { var expectedJob batchv1.Job err = fakeClientSet.Get(context.Background(), types.NamespacedName{Name: jobName, Namespace: cluster.Namespace}, diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index f766fdd807..74c4d9da89 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -24,9 +27,12 @@ import ( "sort" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "github.com/cloudnative-pg/machinery/pkg/stringset" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" @@ -242,6 +248,7 @@ func (r *ClusterReconciler) updateResourceStatus( cluster *apiv1.Cluster, resources *managedResources, ) error { + contextLogger := log.FromContext(ctx) // Retrieve the cluster key existingClusterStatus := cluster.Status @@ -297,7 +304,7 @@ func (r *ClusterReconciler) updateResourceStatus( "targetPrimary", cluster.Status.TargetPrimary, "instances", resources.instances) cluster.Status.TargetPrimary = cluster.Status.CurrentPrimary - cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() } } @@ -316,7 +323,7 @@ func (r *ClusterReconciler) updateResourceStatus( if poolerIntegrations, err := r.getPoolerIntegrationsNeeded(ctx, cluster); err == nil { cluster.Status.PoolerIntegrations = poolerIntegrations } else { - log.Error(err, "while checking pooler integrations were needed, ignored") + contextLogger.Error(err, "while checking pooler integrations were needed, ignored") } // Set the current hash code of the operator binary inside the status. @@ -717,9 +724,10 @@ func (r *ClusterReconciler) setPrimaryInstance( cluster *apiv1.Cluster, podName string, ) error { + origCluster := cluster.DeepCopy() cluster.Status.TargetPrimary = podName - cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() - return r.Status().Update(ctx, cluster) + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() + return r.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) } // RegisterPhase update phase in the status cluster with the @@ -729,7 +737,13 @@ func (r *ClusterReconciler) RegisterPhase(ctx context.Context, phase string, reason string, ) error { - return status.RegisterPhase(ctx, r.Client, cluster, phase, reason) + return status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetPhase(phase, reason), + status.SetClusterReadyCondition, + ) } // updateClusterStatusThatRequiresInstancesState updates all the cluster status fields that require the instances status @@ -746,16 +760,58 @@ func (r *ClusterReconciler) updateClusterStatusThatRequiresInstancesState( cluster.Status.InstancesReportedState[apiv1.PodName(item.Pod.Name)] = apiv1.InstanceReportedState{ IsPrimary: item.IsPrimary, TimeLineID: item.TimeLineID, + IP: item.Pod.Status.PodIP, } } // we update any relevant cluster status that depends on the primary instance + detectedSystemID := stringset.New() for _, item := range statuses.Items { // we refresh the last known timeline on the status root. // This avoids to have a zero timeline id in case that no primary instance is up during reconciliation. if item.IsPrimary && item.TimeLineID != 0 { cluster.Status.TimelineID = item.TimeLineID } + if item.SystemID != "" { + detectedSystemID.Put(item.SystemID) + } + } + + // we update the system ID field in the cluster status + switch detectedSystemID.Len() { + case 0: + cluster.Status.SystemID = "" + + message := "No instances are present in the cluster to report a system ID." + if len(statuses.Items) > 0 { + message = "Instances are present, but none have reported a system ID." + } + + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(apiv1.ConditionConsistentSystemID), + Status: metav1.ConditionFalse, + Reason: "NotFound", + Message: message, + }) + + case 1: + cluster.Status.SystemID = detectedSystemID.ToList()[0] + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(apiv1.ConditionConsistentSystemID), + Status: metav1.ConditionTrue, + Reason: "Unique", + Message: "A single, unique system ID was found across reporting instances.", + }) + + default: + // the instances are reporting an inconsistent system ID + cluster.Status.SystemID = "" + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(apiv1.ConditionConsistentSystemID), + Status: metav1.ConditionFalse, + Reason: "Mismatch", + Message: fmt.Sprintf("Multiple differing system IDs reported by instances: %q", detectedSystemID.ToSortedList()), + }) } if !reflect.DeepEqual(existingClusterStatus, cluster.Status) { @@ -803,7 +859,19 @@ func isWALSpaceAvailableOnPod(pod *corev1.Pod) bool { isTerminatedForMissingWALDiskSpace := func(state *corev1.ContainerState) bool { return state.Terminated != nil && state.Terminated.ExitCode == apiv1.MissingWALDiskSpaceExitCode } + return hasPostgresContainerTerminationReason(pod, isTerminatedForMissingWALDiskSpace) +} + +// isTerminatedBecauseOfMissingWALArchivePlugin check if a Pod terminated because the +// WAL archiving plugin was missing when the Pod started +func isTerminatedBecauseOfMissingWALArchivePlugin(pod *corev1.Pod) bool { + isTerminatedForMissingWALDiskSpace := func(state *corev1.ContainerState) bool { + return state.Terminated != nil && state.Terminated.ExitCode == apiv1.MissingWALArchivePlugin + } + return hasPostgresContainerTerminationReason(pod, isTerminatedForMissingWALDiskSpace) +} +func hasPostgresContainerTerminationReason(pod *corev1.Pod, reason func(state *corev1.ContainerState) bool) bool { var pgContainerStatus *corev1.ContainerStatus for i := range pod.Status.ContainerStatuses { status := pod.Status.ContainerStatuses[i] @@ -821,14 +889,14 @@ func isWALSpaceAvailableOnPod(pod *corev1.Pod) bool { // If the Pod was terminated because it didn't have enough disk // space, then we have no disk space - if isTerminatedForMissingWALDiskSpace(&pgContainerStatus.State) { + if reason(&pgContainerStatus.State) { return false } // The Pod is now running but not still ready, and last time it // was terminated for missing disk space. Let's wait for it // to be ready before classifying it as having enough disk space - if !pgContainerStatus.Ready && isTerminatedForMissingWALDiskSpace(&pgContainerStatus.LastTerminationState) { + if !pgContainerStatus.Ready && reason(&pgContainerStatus.LastTerminationState) { return false } diff --git a/internal/controller/cluster_status_test.go b/internal/controller/cluster_status_test.go index d7e24c8489..4e9d4786b3 100644 --- a/internal/controller/cluster_status_test.go +++ b/internal/controller/cluster_status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -21,11 +24,14 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" . "github.com/onsi/ginkgo/v2" @@ -171,3 +177,214 @@ var _ = Describe("cluster_status unit tests", func() { }) }) }) + +var _ = Describe("updateClusterStatusThatRequiresInstancesState tests", func() { + var ( + env *testingEnvironment + cluster *v1.Cluster + ) + + BeforeEach(func() { + env = buildTestEnvironment() + cluster = newFakeCNPGCluster(env.client, newFakeNamespace(env.client)) + }) + + It("should handle empty status list", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{} + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(BeEmpty()) + Expect(cluster.Status.SystemID).To(BeEmpty()) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("NotFound")) + Expect(condition.Message).To(Equal("No instances are present in the cluster to report a system ID.")) + }) + + It("should handle instances without SystemID", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + SystemID: "", + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + SystemID: "", + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + Expect(cluster.Status.TimelineID).To(Equal(123)) + Expect(cluster.Status.SystemID).To(BeEmpty()) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("NotFound")) + Expect(condition.Message).To(Equal("Instances are present, but none have reported a system ID.")) + }) + + It("should handle instances with a single SystemID", func(ctx SpecContext) { + const systemID = "system123" + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + SystemID: systemID, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + SystemID: systemID, + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + Expect(cluster.Status.TimelineID).To(Equal(123)) + Expect(cluster.Status.SystemID).To(Equal(systemID)) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(condition.Reason).To(Equal("Unique")) + Expect(condition.Message).To(Equal("A single, unique system ID was found across reporting instances.")) + }) + + It("should handle instances with multiple SystemIDs", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + SystemID: "system1", + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + SystemID: "system2", + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + Expect(cluster.Status.TimelineID).To(Equal(123)) + Expect(cluster.Status.SystemID).To(BeEmpty()) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("Mismatch")) + Expect(condition.Message).To(ContainSubstring("Multiple differing system IDs reported by instances:")) + Expect(condition.Message).To(ContainSubstring("system1")) + Expect(condition.Message).To(ContainSubstring("system2")) + }) + + It("should update timeline ID from the primary instance", func(ctx SpecContext) { + const timelineID = 999 + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: timelineID, + SystemID: "system1", + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + TimeLineID: 123, + SystemID: "system1", + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.TimelineID).To(Equal(timelineID)) + }) + + It("should correctly populate InstancesReportedState", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + TimeLineID: 123, + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + + state1 := cluster.Status.InstancesReportedState["pod-1"] + Expect(state1.IsPrimary).To(BeTrue()) + Expect(state1.TimeLineID).To(Equal(123)) + Expect(state1.IP).To(Equal("192.168.1.1")) + + state2 := cluster.Status.InstancesReportedState["pod-2"] + Expect(state2.IsPrimary).To(BeFalse()) + Expect(state2.TimeLineID).To(Equal(123)) + Expect(state2.IP).To(Equal("192.168.1.2")) + }) +}) diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index 8e7d3c0245..53f68a0fa5 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -30,9 +33,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -42,6 +45,10 @@ import ( // instance is not connected via streaming replication var errLogShippingReplicaElected = errors.New("log shipping replica elected as a new post-switchover primary") +// errRolloutDelayed is raised the a pod rollout have been delayed because +// of the operator configuration +var errRolloutDelayed = errors.New("pod rollout delayed") + type rolloutReason = string func (r *ClusterReconciler) rolloutRequiredInstances( @@ -72,6 +79,19 @@ func (r *ClusterReconciler) rolloutRequiredInstances( continue } + managerResult := r.rolloutManager.CoordinateRollout(client.ObjectKeyFromObject(cluster), postgresqlStatus.Pod.Name) + if !managerResult.RolloutAllowed { + r.Recorder.Eventf( + cluster, + "Normal", + "RolloutDelayed", + "Rollout of pod %s have been delayed for %s", + postgresqlStatus.Pod.Name, + managerResult.TimeToWait.String(), + ) + return false, errRolloutDelayed + } + restartMessage := fmt.Sprintf("Restarting instance %s, because: %s", postgresqlStatus.Pod.Name, podRollout.reason) if err := r.RegisterPhase(ctx, cluster, apiv1.PhaseUpgrade, restartMessage); err != nil { @@ -105,6 +125,21 @@ func (r *ClusterReconciler) rolloutRequiredInstances( return false, nil } + managerResult := r.rolloutManager.CoordinateRollout( + client.ObjectKeyFromObject(cluster), + primaryPostgresqlStatus.Pod.Name) + if !managerResult.RolloutAllowed { + r.Recorder.Eventf( + cluster, + "Normal", + "RolloutDelayed", + "Rollout of pod %s have been delayed for %s", + primaryPostgresqlStatus.Pod.Name, + managerResult.TimeToWait.String(), + ) + return false, errRolloutDelayed + } + return r.updatePrimaryPod(ctx, cluster, podList, *primaryPostgresqlStatus.Pod, podRollout.canBeInPlace, podRollout.primaryForceRecreate, podRollout.reason) } @@ -223,7 +258,7 @@ func (r *ClusterReconciler) updateRestartAnnotation( primaryPod.Annotations = make(map[string]string) } primaryPod.Annotations[utils.ClusterRestartAnnotationName] = clusterRestart - if err := r.Client.Patch(ctx, &primaryPod, client.MergeFrom(original)); err != nil { + if err := r.Patch(ctx, &primaryPod, client.MergeFrom(original)); err != nil { return err } } @@ -236,10 +271,15 @@ type rollout struct { required bool canBeInPlace bool primaryForceRecreate bool - reason string + + needsChangeOperatorImage bool + needsChangeOperandImage bool + + reason string } type rolloutChecker func( + ctx context.Context, pod *corev1.Pod, cluster *apiv1.Cluster, ) (rollout, error) @@ -260,6 +300,7 @@ func isInstanceNeedingRollout( required: true, reason: fmt.Sprintf("pod '%s' is not reporting the executable hash", status.Pod.Name), + needsChangeOperatorImage: true, } } @@ -298,7 +339,7 @@ func isPodNeedingRollout( contextLogger := log.FromContext(ctx) applyCheckers := func(checkers map[string]rolloutChecker) rollout { for message, check := range checkers { - podRollout, err := check(pod, cluster) + podRollout, err := check(ctx, pod, cluster) if err != nil { contextLogger.Error(err, "while checking if pod needs rollout") continue @@ -315,11 +356,10 @@ func isPodNeedingRollout( } checkers := map[string]rolloutChecker{ - "pod has missing PVCs": checkHasMissingPVCs, - "pod has PVC requiring resizing": checkHasResizingPVC, - "pod projected volume is outdated": checkProjectedVolumeIsOutdated, - "pod image is outdated": checkPodImageIsOutdated, - "cluster has newer restart annotation": checkClusterHasNewerRestartAnnotation, + "pod has missing PVCs": checkHasMissingPVCs, + "pod projected volume is outdated": checkProjectedVolumeIsOutdated, + "pod image is outdated": checkPodImageIsOutdated, + "cluster has different restart annotation": checkClusterHasDifferentRestartAnnotation, } podRollout := applyCheckers(checkers) @@ -343,10 +383,10 @@ func isPodNeedingRollout( // These checks are subsumed by the PodSpec checker checkers = map[string]rolloutChecker{ - "pod environment is outdated": checkPodEnvironmentIsOutdated, - "pod scheduler is outdated": checkSchedulerIsOutdated, - "pod needs updated topology": checkPodNeedsUpdatedTopology, - "pod init container is outdated": checkPodInitContainerIsOutdated, + "pod environment is outdated": checkPodEnvironmentIsOutdated, + "pod scheduler is outdated": checkSchedulerIsOutdated, + "pod needs updated topology": checkPodNeedsUpdatedTopology, + "pod bootstrap container is outdated": checkPodBootstrapImage, } podRollout = applyCheckers(checkers) if podRollout.required { @@ -358,7 +398,7 @@ func isPodNeedingRollout( // check if the pod has a valid podSpec func hasValidPodSpec(pod *corev1.Pod) bool { - podSpecAnnotation, hasStoredPodSpec := pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] + podSpecAnnotation, hasStoredPodSpec := pod.Annotations[utils.PodSpecAnnotationName] if !hasStoredPodSpec { return false } @@ -366,22 +406,7 @@ func hasValidPodSpec(pod *corev1.Pod) bool { return err == nil } -func checkHasResizingPVC(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - if configuration.Current.EnableAzurePVCUpdates { - for _, pvcName := range cluster.Status.ResizingPVC { - // This code works on the assumption that the PVC begins with the name of the pod using it. - if persistentvolumeclaim.BelongToInstance(cluster, pod.Name, pvcName) { - return rollout{ - required: true, - reason: fmt.Sprintf("rebooting pod to complete the resizing of the following PVC: '%s'", pvcName), - }, nil - } - } - } - return rollout{}, nil -} - -func checkPodNeedsUpdatedTopology(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkPodNeedsUpdatedTopology(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if reflect.DeepEqual(cluster.Spec.TopologySpreadConstraints, pod.Spec.TopologySpreadConstraints) { return rollout{}, nil } @@ -395,7 +420,7 @@ func checkPodNeedsUpdatedTopology(pod *corev1.Pod, cluster *apiv1.Cluster) (roll }, nil } -func checkSchedulerIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkSchedulerIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if cluster.Spec.SchedulerName == "" || cluster.Spec.SchedulerName == pod.Spec.SchedulerName { return rollout{}, nil } @@ -410,7 +435,7 @@ func checkSchedulerIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, }, nil } -func checkProjectedVolumeIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkProjectedVolumeIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { isNilOrZero := func(vs *corev1.ProjectedVolumeSource) bool { return vs == nil || len(vs.Sources) == 0 } @@ -453,8 +478,8 @@ func getProjectedVolumeConfigurationFromPod(pod corev1.Pod) *corev1.ProjectedVol return nil } -func checkPodImageIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - targetImageName := cluster.GetImageName() +func checkPodImageIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { + targetImageName := cluster.Status.Image pgCurrentImageName, err := specs.GetPostgresImageName(*pod) if err != nil { @@ -469,10 +494,11 @@ func checkPodImageIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, required: true, reason: fmt.Sprintf("the instance is using a different image: %s -> %s", pgCurrentImageName, targetImageName), + needsChangeOperandImage: true, }, nil } -func checkPodInitContainerIsOutdated(pod *corev1.Pod, _ *apiv1.Cluster) (rollout, error) { +func checkPodBootstrapImage(_ context.Context, pod *corev1.Pod, _ *apiv1.Cluster) (rollout, error) { if configuration.Current.EnableInstanceManagerInplaceUpdates { return rollout{}, nil } @@ -489,12 +515,13 @@ func checkPodInitContainerIsOutdated(pod *corev1.Pod, _ *apiv1.Cluster) (rollout // We need to apply a different version of the instance manager return rollout{ required: true, - reason: fmt.Sprintf("the instance is using an old init container image: %s -> %s", + reason: fmt.Sprintf("the instance is using an old bootstrap container image: %s -> %s", opCurrentImageName, configuration.Current.OperatorImageName), + needsChangeOperatorImage: true, }, nil } -func checkHasMissingPVCs(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkHasMissingPVCs(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if persistentvolumeclaim.InstanceHasMissingMounts(cluster, pod) { return rollout{ required: true, @@ -505,19 +532,19 @@ func checkHasMissingPVCs(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, erro return rollout{}, nil } -func checkClusterHasNewerRestartAnnotation(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - // check if pod needs to be restarted because of some config requiring it - // or if the cluster have been explicitly restarted - // If the cluster has been restarted and we are working with a Pod - // which has not been restarted yet, or restarted at a different - // time, let's restart it. +func checkClusterHasDifferentRestartAnnotation( + _ context.Context, + pod *corev1.Pod, + cluster *apiv1.Cluster, +) (rollout, error) { + // If the pod restart value doesn't match with the one contained in the cluster, restart the pod. if clusterRestart, ok := cluster.Annotations[utils.ClusterRestartAnnotationName]; ok { podRestart := pod.Annotations[utils.ClusterRestartAnnotationName] if clusterRestart != podRestart { return rollout{ required: true, reason: "cluster has been explicitly restarted via annotation", - canBeInPlace: true, + canBeInPlace: false, }, nil } } @@ -525,7 +552,9 @@ func checkClusterHasNewerRestartAnnotation(pod *corev1.Pod, cluster *apiv1.Clust return rollout{}, nil } -func checkPodEnvironmentIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +// checkPodEnvironmentIsOutdated checks if the environment variables in the pod have changed. +// Deprecated: this function doesn't take into account plugin changes, use PodSpec annotation. +func checkPodEnvironmentIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { // Check if there is a change in the environment section envConfig := specs.CreatePodEnvConfig(*cluster, pod.Name) @@ -570,8 +599,12 @@ func checkPodEnvironmentIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rol return rollout{}, nil } -func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - podSpecAnnotation, ok := pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] +func checkPodSpecIsOutdated( + ctx context.Context, + pod *corev1.Pod, + cluster *apiv1.Cluster, +) (rollout, error) { + podSpecAnnotation, ok := pod.Annotations[utils.PodSpecAnnotationName] if !ok { return rollout{}, nil } @@ -581,10 +614,18 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e if err != nil { return rollout{}, fmt.Errorf("while unmarshaling the pod resources annotation: %w", err) } - envConfig := specs.CreatePodEnvConfig(*cluster, pod.Name) - gracePeriod := int64(cluster.GetMaxStopDelay()) - tlsEnabled := instance.GetStatusSchemeFromPod(pod).IsHTTPS() - targetPodSpec := specs.CreateClusterPodSpec(pod.Name, *cluster, envConfig, gracePeriod, tlsEnabled) + + tlsEnabled := remote.GetStatusSchemeFromPod(pod).IsHTTPS() + + serial, err := utils.GetClusterSerialValue(pod.Annotations) + if err != nil { + return rollout{}, fmt.Errorf("while getting the pod serial value: %w", err) + } + + targetPod, err := specs.NewInstance(ctx, *cluster, serial, tlsEnabled) + if err != nil { + return rollout{}, fmt.Errorf("while creating a new pod to check podSpec: %w", err) + } // the bootstrap init-container could change image after an operator upgrade. // If in-place upgrades of the instance manager are enabled, we don't need rollout. @@ -596,16 +637,13 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e !configuration.Current.EnableInstanceManagerInplaceUpdates { return rollout{ required: true, - reason: fmt.Sprintf("the instance is using an old init container image: %s -> %s", + reason: fmt.Sprintf("the instance is using an old bootstrap container image: %s -> %s", opCurrentImageName, configuration.Current.OperatorImageName), + needsChangeOperatorImage: true, }, nil } - // from here we don't care about drift in the init containers: avoid checking them - storedPodSpec.InitContainers = nil - targetPodSpec.InitContainers = nil - - match, diff := specs.ComparePodSpecs(storedPodSpec, targetPodSpec) + match, diff := specs.ComparePodSpecs(storedPodSpec, targetPod.Spec) if !match { return rollout{ required: true, @@ -626,7 +664,7 @@ func (r *ClusterReconciler) upgradePod( ) error { log.FromContext(ctx).Info("Recreating instance pod", "pod", pod.Name, - "to", cluster.Spec.ImageName, + "to", cluster.Status.Image, "reason", reason, ) diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 7898a8adf2..9b180df291 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,18 +13,24 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( + "context" "encoding/json" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8client "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" @@ -48,6 +55,9 @@ var _ = Describe("Pod upgrade", Ordered, func() { Spec: apiv1.ClusterSpec{ ImageName: "postgres:13.11", }, + Status: apiv1.ClusterStatus{ + Image: "postgres:13.11", + }, } configuration.Current = configuration.NewConfiguration() }) @@ -57,7 +67,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("will not require a restart for just created Pods", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -71,7 +82,9 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when running a different image name", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + pod.Spec.Containers[0].Image = "postgres:13.10" status := postgres.PostgresqlStatus{ Pod: pod, @@ -81,10 +94,13 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(BeEquivalentTo("the instance is using a different image: postgres:13.10 -> postgres:13.11")) + Expect(rollout.needsChangeOperandImage).To(BeTrue()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("requires rollout when a restart annotation has been added to the cluster", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) clusterRestart := cluster clusterRestart.Annotations = make(map[string]string) clusterRestart.Annotations[utils.ClusterRestartAnnotationName] = "now" @@ -98,7 +114,9 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &clusterRestart) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("cluster has been explicitly restarted via annotation")) - Expect(rollout.canBeInPlace).To(BeTrue()) + Expect(rollout.canBeInPlace).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) rollout = isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeFalse()) @@ -106,7 +124,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("should prioritize full rollout over inplace restarts", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -136,7 +155,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when PostgreSQL needs to be restarted", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -157,10 +177,13 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout = isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("Postgres needs a restart to apply some configuration changes")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("requires pod rollout if executable does not have a hash", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, PendingRestart: false, @@ -170,11 +193,14 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("pod 'test-1' is not reporting the executable hash")) Expect(rollout.canBeInPlace).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeTrue()) }) It("checkPodSpecIsOutdated should not return any error", func() { - pod := specs.PodWithExistingStorage(cluster, 1) - rollout, err := checkPodSpecIsOutdated(pod, &cluster) + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + rollout, err := checkPodSpecIsOutdated(context.TODO(), pod, &cluster) Expect(rollout.required).To(BeFalse()) Expect(rollout.canBeInPlace).To(BeFalse()) Expect(rollout.reason).To(BeEmpty()) @@ -182,7 +208,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("checks when a rollout is needed for any reason", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, PendingRestart: true, @@ -202,11 +229,14 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(BeEquivalentTo("Postgres needs a restart to apply some configuration changes")) Expect(rollout.canBeInPlace).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) When("the PodSpec annotation is not available", func() { It("should trigger a rollout when the scheduler changes", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -220,6 +250,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("scheduler name changed")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) @@ -229,7 +261,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" status := postgres.PostgresqlStatus{ @@ -242,6 +275,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("scheduler-name")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) When("cluster has resources specified", func() { @@ -258,7 +293,13 @@ var _ = Describe("Pod upgrade", Ordered, func() { }, } It("should trigger a rollout when the cluster has a Resource changed", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(clusterWithResources, 1) + pod, err := specs.NewInstance( + context.TODO(), + clusterWithResources, + 1, + true, + ) + Expect(err).ToNot(HaveOccurred()) clusterWithResources.Spec.Resources.Limits["cpu"] = resource.MustParse("3") // was "2" status := postgres.PostgresqlStatus{ @@ -272,9 +313,12 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("original and target PodSpec differ in containers")) Expect(rollout.reason).To(ContainSubstring("container postgres differs in resources")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should trigger a rollout when the cluster has Resources deleted from spec", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(clusterWithResources, 1) + pod, err := specs.NewInstance(context.TODO(), clusterWithResources, 1, true) + Expect(err).ToNot(HaveOccurred()) clusterWithResources.Spec.Resources = corev1.ResourceRequirements{} status := postgres.PostgresqlStatus{ @@ -288,12 +332,15 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("original and target PodSpec differ in containers")) Expect(rollout.reason).To(ContainSubstring("container postgres differs in resources")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) When("the PodSpec annotation is not available", func() { It("detects when a new custom environment variable is set", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) cluster := cluster.DeepCopy() @@ -313,6 +360,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("environment variable configuration hash changed")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not trigger a rollout on operator changes with inplace upgrades", func(ctx SpecContext) { @@ -321,7 +370,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) status := postgres.PostgresqlStatus{ @@ -345,7 +395,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) status := postgres.PostgresqlStatus{ @@ -359,14 +410,17 @@ var _ = Describe("Pod upgrade", Ordered, func() { configuration.Current.OperatorImageName = newOperatorImage configuration.Current.EnableInstanceManagerInplaceUpdates = false rollout := isInstanceNeedingRollout(ctx, status, &cluster) - Expect(rollout.reason).To(ContainSubstring("the instance is using an old init container image")) + Expect(rollout.reason).To(ContainSubstring("the instance is using an old bootstrap container image")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeTrue()) }) }) When("the podSpec annotation is available", func() { It("detects when a new custom environment variable is set", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) cluster := cluster.DeepCopy() cluster.Spec.Env = []corev1.EnvVar{ @@ -386,6 +440,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("original and target PodSpec differ in containers")) Expect(rollout.reason).To(ContainSubstring("container postgres differs in environment")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not trigger a rollout on operator changes with inplace upgrades", func(ctx SpecContext) { @@ -394,7 +450,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -417,7 +474,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -430,8 +488,10 @@ var _ = Describe("Pod upgrade", Ordered, func() { configuration.Current.OperatorImageName = newOperatorImage configuration.Current.EnableInstanceManagerInplaceUpdates = false rollout := isInstanceNeedingRollout(ctx, status, &cluster) - Expect(rollout.reason).To(ContainSubstring("the instance is using an old init container image")) + Expect(rollout.reason).To(ContainSubstring("the instance is using an old bootstrap container image")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeTrue()) }) }) @@ -441,7 +501,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{ Sources: []corev1.VolumeProjection{}, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, IsPodReady: true, @@ -458,7 +519,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{ Sources: nil, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, IsPodReady: true, @@ -473,7 +535,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { It("should not require rollout if projected volume is nil", func(ctx SpecContext) { cluster.Spec.ProjectedVolumeTemplate = nil - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, IsPodReady: true, @@ -505,7 +568,9 @@ var _ = Describe("Test pod rollout due to topology", func() { TopologySpreadConstraints: []corev1.TopologySpreadConstraint{topology}, }, } - pod = specs.PodWithExistingStorage(*cluster, 1) + var err error + pod, err = specs.NewInstance(context.TODO(), *cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) }) When("the original podSpec annotation is available", func() { @@ -532,6 +597,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("topology-spread-constraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when the LabelSelector maps are different", func(ctx SpecContext) { @@ -547,6 +614,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("topology-spread-constraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when TopologySpreadConstraints is nil in one of the objects", func(ctx SpecContext) { @@ -560,11 +629,15 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("topology-spread-constraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) { cluster.Spec.TopologySpreadConstraints = nil - pod = specs.PodWithExistingStorage(*cluster, 1) + var err error + pod, err = specs.NewInstance(context.TODO(), *cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec.TopologySpreadConstraints).To(BeNil()) status := postgres.PostgresqlStatus{ @@ -575,6 +648,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(BeEmpty()) Expect(rollout.required).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) @@ -588,6 +663,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(BeEmpty()) Expect(rollout.required).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when the cluster and pod do not have "+ @@ -603,6 +680,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("does not have up-to-date TopologySpreadConstraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when the LabelSelector maps are different", func(ctx SpecContext) { @@ -620,6 +699,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("does not have up-to-date TopologySpreadConstraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when TopologySpreadConstraints is nil in one of the objects", func(ctx SpecContext) { @@ -635,6 +716,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("does not have up-to-date TopologySpreadConstraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) { @@ -651,6 +734,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(BeEmpty()) Expect(rollout.required).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) }) @@ -677,14 +762,14 @@ var _ = Describe("hasValidPodSpec", func() { It("should return true", func() { podSpec := &corev1.PodSpec{} podSpecBytes, _ := json.Marshal(podSpec) - pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] = string(podSpecBytes) + pod.Annotations[utils.PodSpecAnnotationName] = string(podSpecBytes) Expect(hasValidPodSpec(pod)).To(BeTrue()) }) }) Context("and the PodSpecAnnotation is invalid", func() { It("should return false", func() { - pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] = "invalid JSON" + pod.Annotations[utils.PodSpecAnnotationName] = "invalid JSON" Expect(hasValidPodSpec(pod)).To(BeFalse()) }) }) @@ -709,9 +794,10 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func() }) It("skips the rollout if the annotation that disables PodSpec reconciliation is set", func(ctx SpecContext) { - cluster.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" + cluster.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -728,3 +814,108 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func() Expect(rollout.reason).To(BeEmpty()) }) }) + +type fakePluginClientRollout struct { + pluginClient.Client + returnedPod *corev1.Pod + returnedError error +} + +func (f fakePluginClientRollout) LifecycleHook( + _ context.Context, + _ plugin.OperationVerb, + _ k8client.Object, + _ k8client.Object, +) (k8client.Object, error) { + return f.returnedPod, f.returnedError +} + +var _ = Describe("checkPodSpec with plugins", Ordered, func() { + var cluster apiv1.Cluster + + BeforeEach(func() { + cluster = apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:13.11", + }, + } + configuration.Current = configuration.NewConfiguration() + }) + + AfterAll(func() { + configuration.Current = configuration.NewConfiguration() + }) + + It("image change", func() { + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + + podModifiedByPlugins := pod.DeepCopy() + + podModifiedByPlugins.Spec.Containers[0].Image = "postgres:19.0" + + pluginCli := fakePluginClientRollout{ + returnedPod: podModifiedByPlugins, + } + + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) + + rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(rollout.required).To(BeTrue()) + Expect(rollout.reason).To(Equal( + "original and target PodSpec differ in containers: container postgres differs in image")) + }) + + It("init-container change", func() { + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + + podModifiedByPlugins := pod.DeepCopy() + + podModifiedByPlugins.Spec.InitContainers = []corev1.Container{ + { + Name: "new-init-container", + Image: "postgres:19.0", + }, + } + + pluginCli := fakePluginClientRollout{ + returnedPod: podModifiedByPlugins, + } + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) + + rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(rollout.required).To(BeTrue()) + Expect(rollout.reason).To(Equal( + "original and target PodSpec differ in init-containers: container new-init-container has been added")) + }) + + It("environment variable change", func() { + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + + podModifiedByPlugins := pod.DeepCopy() + + podModifiedByPlugins.Spec.Containers[0].Env = append(podModifiedByPlugins.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: "NEW_ENV", + Value: "new_value", + }) + + pluginCli := fakePluginClientRollout{ + returnedPod: podModifiedByPlugins, + } + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) + + rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(rollout.required).To(BeTrue()) + Expect(rollout.reason).To(Equal( + "original and target PodSpec differ in containers: container postgres differs in environment")) + }) +}) diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go new file mode 100644 index 0000000000..9d0e1467bb --- /dev/null +++ b/internal/controller/finalizers_delete.go @@ -0,0 +1,152 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "errors" + + "github.com/cloudnative-pg/machinery/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// notifyDeletionToOwnedResources notifies the cluster deletion to the managed owned resources +func (r *ClusterReconciler) notifyDeletionToOwnedResources( + ctx context.Context, + namespacedName types.NamespacedName, +) error { + var dbList apiv1.DatabaseList + if err := r.List(ctx, &dbList, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } + + if err := notifyOwnedResourceDeletion( + ctx, + r.Client, + namespacedName, + toSliceWithPointers(dbList.Items), + utils.DatabaseFinalizerName, + ); err != nil { + return err + } + + var pbList apiv1.PublicationList + if err := r.List(ctx, &pbList, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } + + if err := notifyOwnedResourceDeletion( + ctx, + r.Client, + namespacedName, + toSliceWithPointers(pbList.Items), + utils.PublicationFinalizerName, + ); err != nil { + return err + } + + var sbList apiv1.SubscriptionList + if err := r.List(ctx, &sbList, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } + + return notifyOwnedResourceDeletion( + ctx, + r.Client, + namespacedName, + toSliceWithPointers(sbList.Items), + utils.SubscriptionFinalizerName, + ) +} + +// clusterOwnedResourceWithStatus is a kubernetes resource object owned by a cluster that has status +// capabilities +type clusterOwnedResourceWithStatus interface { + client.Object + GetClusterRef() corev1.LocalObjectReference + GetStatusMessage() string + SetAsFailed(err error) + SetStatusObservedGeneration(obsGeneration int64) +} + +func toSliceWithPointers[T any](items []T) []*T { + result := make([]*T, len(items)) + for i := range items { + result[i] = &items[i] + } + return result +} + +// notifyOwnedResourceDeletion deletes finalizers for a given resource type +func notifyOwnedResourceDeletion[T clusterOwnedResourceWithStatus]( + ctx context.Context, + cli client.Client, + namespacedName types.NamespacedName, + objects []T, + finalizerName string, +) error { + contextLogger := log.FromContext(ctx) + for _, obj := range objects { + itemLogger := contextLogger.WithValues( + "resourceKind", obj.GetObjectKind().GroupVersionKind().Kind, + "resourceName", obj.GetName(), + "finalizerName", finalizerName, + ) + if obj.GetClusterRef().Name != namespacedName.Name { + continue + } + + const statusMessage = "cluster resource has been deleted, skipping reconciliation" + + origObj := obj.DeepCopyObject().(T) + + if obj.GetStatusMessage() != statusMessage { + obj.SetAsFailed(errors.New(statusMessage)) + obj.SetStatusObservedGeneration(0) + // We need to use an update here because of the observed generation set to 0 + // that would be ignored with the patch method. + if err := cli.Status().Update(ctx, obj); err != nil { + itemLogger.Error(err, "error while updating failed status for cluster deletion") + return err + } + } + + if controllerutil.RemoveFinalizer(obj, finalizerName) { + itemLogger.Debug("Removing finalizer from resource") + if err := cli.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil { + itemLogger.Error( + err, + "while removing the finalizer", + "oldFinalizerList", origObj.GetFinalizers(), + "newFinalizerList", obj.GetFinalizers(), + ) + return err + } + } + } + + return nil +} diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go new file mode 100644 index 0000000000..40b9793b5f --- /dev/null +++ b/internal/controller/finalizers_delete_test.go @@ -0,0 +1,341 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// nolint: dupl +var _ = Describe("Test cleanup of owned objects on cluster deletion", func() { + var ( + r ClusterReconciler + scheme *runtime.Scheme + namespacedName types.NamespacedName + ) + + BeforeEach(func() { + scheme = schemeBuilder.BuildWithAllKnownScheme() + r = ClusterReconciler{ + Scheme: scheme, + } + namespacedName = types.NamespacedName{ + Namespace: "test", + Name: "cluster", + } + }) + + It("should set databases on the cluster as failed and delete their finalizers", func(ctx SpecContext) { + databaseList := &apiv1.DatabaseList{ + Items: []apiv1.Database{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.DatabaseFinalizerName, + }, + Name: "db-1", + Namespace: "test", + }, + Spec: apiv1.DatabaseSpec{ + Name: "db-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + Status: apiv1.DatabaseStatus{ + Applied: ptr.To(true), + Message: "", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.DatabaseFinalizerName, + }, + Name: "db-2", + Namespace: "test", + }, + Spec: apiv1.DatabaseSpec{ + Name: "db-test-2", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList). + WithStatusSubresource(&databaseList.Items[0], &databaseList.Items[1]).Build() + r.Client = cli + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + for _, db := range databaseList.Items { + database := &apiv1.Database{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&db), database) + Expect(err).ToNot(HaveOccurred()) + Expect(database.Finalizers).To(BeZero()) + Expect(database.Status.Applied).To(HaveValue(BeFalse())) + Expect(database.Status.Message).To(ContainSubstring("cluster resource has been deleted")) + } + }) + + It("should not delete database finalizers for databases in another cluster", + func(ctx SpecContext) { + databaseList := &apiv1.DatabaseList{ + Items: []apiv1.Database{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.DatabaseFinalizerName, + }, + Name: "db-1", + Namespace: "test", + }, + Spec: apiv1.DatabaseSpec{ + Name: "db-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "another-cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() + r.Client = cli + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + database := &apiv1.Database{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&databaseList.Items[0]), database) + Expect(err).ToNot(HaveOccurred()) + Expect(database.Finalizers).To(BeEquivalentTo([]string{utils.DatabaseFinalizerName})) + Expect(database.Status.Applied).To(BeNil()) + Expect(database.Status.Message).ToNot(ContainSubstring("not reconciled")) + }) + + It("should set publications on the cluster as failed and delete their finalizers", func(ctx SpecContext) { + publicationList := &apiv1.PublicationList{ + Items: []apiv1.Publication{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.PublicationFinalizerName, + }, + Name: "pub-1", + Namespace: "test", + }, + Spec: apiv1.PublicationSpec{ + Name: "pub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + Status: apiv1.PublicationStatus{ + Applied: ptr.To(true), + Message: "", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.PublicationFinalizerName, + }, + Name: "pub-2", + Namespace: "test", + }, + Spec: apiv1.PublicationSpec{ + Name: "pub-test-2", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList). + WithStatusSubresource(&publicationList.Items[0], &publicationList.Items[1]).Build() + r.Client = cli + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + for _, pub := range publicationList.Items { + publication := &apiv1.Publication{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&pub), publication) + Expect(err).ToNot(HaveOccurred()) + Expect(publication.Finalizers).To(BeZero()) + Expect(publication.Status.Applied).To(HaveValue(BeFalse())) + Expect(publication.Status.Message).To(ContainSubstring("cluster resource has been deleted")) + } + }) + + It("should not delete publication finalizers for publications in another cluster", func(ctx SpecContext) { + publicationList := &apiv1.PublicationList{ + Items: []apiv1.Publication{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.PublicationFinalizerName, + }, + Name: "pub-1", + Namespace: "test", + }, + Spec: apiv1.PublicationSpec{ + Name: "pub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "another-cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build() + r.Client = cli + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + publication := &apiv1.Publication{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&publicationList.Items[0]), publication) + Expect(err).ToNot(HaveOccurred()) + Expect(publication.Finalizers).To(BeEquivalentTo([]string{utils.PublicationFinalizerName})) + Expect(publication.Status.Applied).To(BeNil()) + Expect(publication.Status.Message).ToNot(ContainSubstring("not reconciled")) + }) + + It("should set subscriptions on the cluster as failed and delete their finalizers ", func(ctx SpecContext) { + subscriptionList := &apiv1.SubscriptionList{ + Items: []apiv1.Subscription{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.SubscriptionFinalizerName, + }, + Name: "sub-1", + Namespace: "test", + }, + Spec: apiv1.SubscriptionSpec{ + Name: "sub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + Status: apiv1.SubscriptionStatus{ + Applied: ptr.To(true), + Message: "", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.SubscriptionFinalizerName, + }, + Name: "sub-2", + Namespace: "test", + }, + Spec: apiv1.SubscriptionSpec{ + Name: "sub-test-2", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList). + WithStatusSubresource(&subscriptionList.Items[0], &subscriptionList.Items[1]).Build() + r.Client = cli + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + for _, sub := range subscriptionList.Items { + subscription := &apiv1.Subscription{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&sub), subscription) + Expect(err).ToNot(HaveOccurred()) + Expect(subscription.Finalizers).To(BeZero()) + Expect(subscription.Status.Applied).To(HaveValue(BeFalse())) + Expect(subscription.Status.Message).To(ContainSubstring("cluster resource has been deleted")) + } + }) + + It("should not delete subscription finalizers for subscriptions in another cluster", func(ctx SpecContext) { + subscriptionList := &apiv1.SubscriptionList{ + Items: []apiv1.Subscription{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.SubscriptionFinalizerName, + }, + Name: "sub-1", + Namespace: "test", + }, + Spec: apiv1.SubscriptionSpec{ + Name: "sub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "another-cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build() + r.Client = cli + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + subscription := &apiv1.Subscription{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&subscriptionList.Items[0]), subscription) + Expect(err).ToNot(HaveOccurred()) + Expect(subscription.Finalizers).To(BeEquivalentTo([]string{utils.SubscriptionFinalizerName})) + Expect(subscription.Status.Applied).To(BeNil()) + Expect(subscription.Status.Message).ToNot(ContainSubstring("not reconciled")) + }) +}) + +type testStruct struct{ Val int } + +var _ = Describe("toSliceWithPointers", func() { + It("should return pointers to the original slice elements", func() { + items := []testStruct{{1}, {2}, {3}} + pointers := toSliceWithPointers(items) + Expect(pointers).To(HaveLen(len(items))) + for i := range items { + Expect(pointers[i]).To(BeIdenticalTo(&items[i])) + } + }) +}) diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go index b29f0ca7e8..5b2175b804 100644 --- a/internal/controller/plugin_controller.go +++ b/internal/controller/plugin_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the controller of the CRD @@ -31,9 +34,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -45,14 +49,21 @@ type PluginReconciler struct { Scheme *runtime.Scheme Plugins repository.Interface + + OperatorNamespace string } // NewPluginReconciler creates a new PluginReconciler initializing it -func NewPluginReconciler(mgr manager.Manager, plugins repository.Interface) *PluginReconciler { +func NewPluginReconciler( + mgr manager.Manager, + operatorNamespace string, + plugins repository.Interface, +) *PluginReconciler { return &PluginReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Plugins: plugins, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Plugins: plugins, + OperatorNamespace: operatorNamespace, } } @@ -60,9 +71,9 @@ func NewPluginReconciler(mgr manager.Manager, plugins repository.Interface) *Plu func (r *PluginReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { contextLogger, ctx := log.SetupLogger(ctx) - contextLogger.Debug("Plugin reconciliation loop start") + contextLogger.Trace("Plugin reconciliation loop start") defer func() { - contextLogger.Debug("Plugin reconciliation loop end") + contextLogger.Trace("Plugin reconciliation loop end") }() var service corev1.Service @@ -79,6 +90,11 @@ func (r *PluginReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, fmt.Errorf("cannot get the resource: %w", err) } + if !isPluginService(&service, r.OperatorNamespace) { + contextLogger.Trace("Skipping reconciliation for a non-cnpg-i service") + return ctrl.Result{}, nil + } + // Process label and annotations pluginName := service.Labels[utils.PluginNameLabelName] if len(pluginName) == 0 { @@ -206,25 +222,55 @@ func (r *PluginReconciler) getSecret( return &secret, nil } -// SetupWithManager adds this PluginReconciler to the passed controller manager -func (r *PluginReconciler) SetupWithManager(mgr ctrl.Manager, operatorNamespace string) error { - pluginServicesPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return isPluginService(e.Object, operatorNamespace) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return isPluginService(e.Object, operatorNamespace) - }, - GenericFunc: func(e event.GenericEvent) bool { - return isPluginService(e.Object, operatorNamespace) - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return isPluginService(e.ObjectNew, operatorNamespace) - }, +func (r *PluginReconciler) mapSecretToPlugin(ctx context.Context, obj client.Object) []reconcile.Request { + // We only consider the secrets that are installed in the + // operator namespace because plugins need to be deployed + // in the same namespace as the operator. + if obj.GetNamespace() != r.OperatorNamespace { + return nil } + logger := log.FromContext(ctx) + + var services corev1.ServiceList + if err := r.List( + ctx, + &services, + client.HasLabels{utils.PluginNameLabelName}, + client.InNamespace(r.OperatorNamespace), + ); err != nil { + logger.Error( + err, + "Error while listing CNPG-I services in the operator namespace", + ) + return nil + } + + var result []reconcile.Request + for i := range services.Items { + service := &services.Items[i] + if isSecretUsedByPluginService(service, obj.GetName()) { + result = append(result, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(service), + }) + } + } + + return result +} + +// SetupWithManager adds this PluginReconciler to the passed controller manager +func (r *PluginReconciler) SetupWithManager( + mgr ctrl.Manager, + maxConcurrentReconciles int, +) error { return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&corev1.Service{}). - WithEventFilter(pluginServicesPredicate). + Named("plugin"). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.mapSecretToPlugin), + ). Complete(r) } diff --git a/internal/controller/plugin_predicates.go b/internal/controller/plugin_predicates.go index 7f6e381a0d..dfcf0fb02a 100644 --- a/internal/controller/plugin_predicates.go +++ b/internal/controller/plugin_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -22,7 +25,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -var isPluginService = func(object client.Object, operatorNamespace string) bool { +func isPluginService(object client.Object, operatorNamespace string) bool { if object.GetNamespace() != operatorNamespace { // Only consider the services that are in the same // namespace where the operator is installed @@ -50,3 +53,14 @@ var isPluginService = func(object client.Object, operatorNamespace string) bool return true } + +// isSecretUsedByPluginService returns true when the passed service +// uses the secret with the passed name +func isSecretUsedByPluginService(service client.Object, secretName string) bool { + annotations := service.GetAnnotations() + + clientSecretName := annotations[utils.PluginClientSecretAnnotationName] + serverSecretName := annotations[utils.PluginServerSecretAnnotationName] + + return clientSecretName == secretName || serverSecretName == secretName +} diff --git a/internal/controller/plugins.go b/internal/controller/plugins.go index 6924b83005..979fed5739 100644 --- a/internal/controller/plugins.go +++ b/internal/controller/plugins.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -27,7 +30,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // preReconcilePluginHooks ensures we call the pre-reconcile plugin hooks @@ -36,7 +38,7 @@ func preReconcilePluginHooks( cluster *apiv1.Cluster, object client.Object, ) cnpgiClient.ReconcilerHookResult { - pluginClient := getPluginClientFromContext(ctx) + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) return pluginClient.PreReconcile(ctx, cluster, object) } @@ -46,7 +48,7 @@ func postReconcilePluginHooks( cluster *apiv1.Cluster, object client.Object, ) cnpgiClient.ReconcilerHookResult { - pluginClient := getPluginClientFromContext(ctx) + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) return pluginClient.PostReconcile(ctx, cluster, object) } @@ -81,15 +83,8 @@ func setStatusPluginHook( "after", cluster.Status.PluginStatus, ) - return ctrl.Result{RequeueAfter: 5 * time.Second}, cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) -} - -// setPluginClientInContext records the plugin client in the given context -func setPluginClientInContext(ctx context.Context, client cnpgiClient.Client) context.Context { - return context.WithValue(ctx, utils.PluginClientKey, client) -} - -// getPluginClientFromContext gets the current plugin client from the context -func getPluginClientFromContext(ctx context.Context) cnpgiClient.Client { - return ctx.Value(utils.PluginClientKey).(cnpgiClient.Client) + if err := cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } diff --git a/internal/controller/plugins_test.go b/internal/controller/plugins_test.go index c822faa009..1cdeb8fa0f 100644 --- a/internal/controller/plugins_test.go +++ b/internal/controller/plugins_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go index 8be6af3ec6..8f64989d08 100644 --- a/internal/controller/pooler_controller.go +++ b/internal/controller/pooler_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -35,6 +38,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -126,9 +130,11 @@ func (r *PoolerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } // SetupWithManager setup this controller inside the controller manager -func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int) error { return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&apiv1.Pooler{}). + Named("pooler"). Owns(&v1.Deployment{}). Owns(&corev1.Service{}). Owns(&corev1.ServiceAccount{}). @@ -154,7 +160,7 @@ func isOwnedByPoolerKind(obj client.Object) (string, bool) { return "", false } - if owner.APIVersion != apiGVString { + if owner.APIVersion != apiSGVString { return "", false } diff --git a/internal/controller/pooler_controller_test.go b/internal/controller/pooler_controller_test.go index 9e22fb7302..5d6f10be87 100644 --- a/internal/controller/pooler_controller_test.go +++ b/internal/controller/pooler_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_predicates.go b/internal/controller/pooler_predicates.go index 9813ff59b8..ebd14931db 100644 --- a/internal/controller/pooler_predicates.go +++ b/internal/controller/pooler_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_predicates_test.go b/internal/controller/pooler_predicates_test.go index 5a324fb27d..3ea269e8be 100644 --- a/internal/controller/pooler_predicates_test.go +++ b/internal/controller/pooler_predicates_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_resources.go b/internal/controller/pooler_resources.go index 40997786ed..5fea930362 100644 --- a/internal/controller/pooler_resources.go +++ b/internal/controller/pooler_resources.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_resources_test.go b/internal/controller/pooler_resources_test.go index 14468170e7..988673c389 100644 --- a/internal/controller/pooler_resources_test.go +++ b/internal/controller/pooler_resources_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_status.go b/internal/controller/pooler_status.go index af9f9062fa..a1b06ebca6 100644 --- a/internal/controller/pooler_status.go +++ b/internal/controller/pooler_status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_status_test.go b/internal/controller/pooler_status_test.go index 18891442b2..9bbba948fd 100644 --- a/internal/controller/pooler_status_test.go +++ b/internal/controller/pooler_status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -20,6 +23,7 @@ import ( "context" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -92,6 +96,39 @@ var _ = Describe("pooler_status unit tests", func() { Expect(pooler.Status.Instances).To(Equal(dep.Status.Replicas)) }) + It("should correctly set pod resources to the bootstrap init container", func() { + cluster := newFakeCNPGCluster(env.client, "test-namespace") + + pooler := &v1.Pooler{ + Spec: v1.PoolerSpec{ + Template: &v1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + }, + }, + }, + }, + } + + dep, err := pgbouncer.Deployment(pooler, cluster) + Expect(err).ToNot(HaveOccurred()) + // check that the init container has the correct resources + Expect(dep.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + initResources := dep.Spec.Template.Spec.InitContainers[0].Resources + Expect(initResources.Requests).To(HaveKeyWithValue(corev1.ResourceCPU, resource.MustParse("100m"))) + Expect(initResources.Requests).To(HaveKeyWithValue(corev1.ResourceMemory, resource.MustParse("128Mi"))) + Expect(initResources.Limits).To(HaveKeyWithValue(corev1.ResourceCPU, resource.MustParse("200m"))) + Expect(initResources.Limits).To(HaveKeyWithValue(corev1.ResourceMemory, resource.MustParse("256Mi"))) + }) + It("should correctly interact with the api server", func() { ctx := context.Background() namespace := newFakeNamespace(env.client) diff --git a/internal/controller/pooler_update.go b/internal/controller/pooler_update.go index edfa3f1927..cc34254f3e 100644 --- a/internal/controller/pooler_update.go +++ b/internal/controller/pooler_update.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_update_test.go b/internal/controller/pooler_update_test.go index 6f0b599c23..820e7a1753 100644 --- a/internal/controller/pooler_update_test.go +++ b/internal/controller/pooler_update_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -297,7 +300,7 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { pooler := newFakePooler(env.client, cluster) res := &poolerManagedResources{Deployment: nil, Cluster: cluster} By("setting the reconcilePodSpec annotation to disabled on the pooler ", func() { - pooler.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" + pooler.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" pooler.Spec.Template = &apiv1.PodTemplateSpec{ Spec: corev1.PodSpec{ TerminationGracePeriodSeconds: ptr.To(int64(100)), @@ -344,7 +347,7 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { }) By("enable again, making sure pooler change updates the deployment", func() { - delete(pooler.ObjectMeta.Annotations, utils.ReconcilePodSpecAnnotationName) + delete(pooler.Annotations, utils.ReconcilePodSpecAnnotationName) beforeDep := getPoolerDeployment(ctx, env.client, pooler) pooler.Spec.Template.Spec.TerminationGracePeriodSeconds = ptr.To(int64(300)) err := env.poolerReconciler.updateDeployment(ctx, pooler, res) @@ -362,7 +365,6 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { var _ = Describe("ensureServiceAccountPullSecret", func() { var ( - ctx context.Context r *PoolerReconciler pooler *apiv1.Pooler conf *configuration.Data @@ -385,8 +387,6 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { } BeforeEach(func() { - ctx = context.TODO() - pullSecret = generateOperatorPullSecret() conf = &configuration.Data{ @@ -417,13 +417,13 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { } }) - It("should create the pull secret", func() { + It("should create the pull secret", func(ctx SpecContext) { name, err := r.ensureServiceAccountPullSecret(ctx, pooler, conf) Expect(err).ToNot(HaveOccurred()) Expect(name).To(Equal(poolerSecretName)) }) - It("should not change the pull secret if it matches", func() { + It("should not change the pull secret if it matches", func(ctx SpecContext) { By("creating the secret before triggering the reconcile") secret := generateOperatorPullSecret() secret.Name = poolerSecretName @@ -450,7 +450,7 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { Expect(remoteSecret).To(BeEquivalentTo(remoteSecret)) }) - It("should reconcile the secret if it doesn't match", func() { + It("should reconcile the secret if it doesn't match", func(ctx SpecContext) { By("creating the secret before triggering the reconcile") secret := generateOperatorPullSecret() secret.Name = poolerSecretName diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index 399452d797..bad7f515de 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -23,6 +26,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -63,7 +67,7 @@ func (r *ClusterReconciler) reconcileTargetPrimaryFromPods( if primary := status.Items[0]; (primary.IsPrimary || (cluster.IsReplica() && primary.IsPodReady)) && primary.Pod.Name == cluster.Status.CurrentPrimary && cluster.Status.TargetPrimary == cluster.Status.CurrentPrimary { - isPrimaryOnUnschedulableNode, err := r.isNodeUnschedulable(ctx, primary.Node) + isPrimaryOnUnschedulableNode, err := r.isNodeUnschedulableOrBeingDrained(ctx, primary.Node) if err != nil { contextLogger.Error(err, "while checking if current primary is on an unschedulable node") // in case of error it's better to proceed with the normal target primary reconciliation @@ -106,6 +110,24 @@ func (r *ClusterReconciler) reconcileTargetPrimaryForNonReplicaCluster( return "", err } + // If quorum check is active, ensure we don't failover in unsafe scenarios. + isFailoverQuorumActive, err := cluster.IsFailoverQuorumActive() + if err != nil { + contextLogger.Error(err, "Failed to determine if failover quorum is active") + isFailoverQuorumActive = false + } + + if cluster.Status.TargetPrimary == cluster.Status.CurrentPrimary && + cluster.Spec.PostgresConfiguration.Synchronous != nil && + isFailoverQuorumActive { + if status, err := r.evaluateQuorumCheck(ctx, cluster, status); err != nil { + return "", err + } else if !status { + // Prevent a failover from happening + return "", nil + } + } + // The current primary is not correctly working, and we need to elect a new one // but before doing that we need to wait for all the WAL receivers to be // terminated. To make sure they eventually terminate we signal the old primary @@ -165,14 +187,31 @@ func (r *ClusterReconciler) reconcileTargetPrimaryForNonReplicaCluster( return mostAdvancedInstance.Pod.Name, r.setPrimaryInstance(ctx, cluster, mostAdvancedInstance.Pod.Name) } -// isNodeUnschedulable checks whether a node is set to unschedulable -func (r *ClusterReconciler) isNodeUnschedulable(ctx context.Context, nodeName string) (bool, error) { +// isNodeUnschedulableOrBeingDrained checks if a node is currently being drained. +// nolint: lll +// Copied from https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/7bacf2d36f397bd098b3388403e8759c480be7e5/cmd/hooks/prestop.go#L91 +func isNodeUnschedulableOrBeingDrained(node *corev1.Node, drainTaints []string) bool { + for _, taint := range node.Spec.Taints { + if slices.Contains(drainTaints, taint.Key) { + return true + } + } + + return node.Spec.Unschedulable +} + +// isNodeUnschedulableOrBeingDrained checks whether a node is set to unschedulable +func (r *ClusterReconciler) isNodeUnschedulableOrBeingDrained( + ctx context.Context, + nodeName string, +) (bool, error) { var node corev1.Node err := r.Get(ctx, client.ObjectKey{Name: nodeName}, &node) if err != nil { return false, err } - return node.Spec.Unschedulable, nil + + return isNodeUnschedulableOrBeingDrained(&node, r.drainTaints), nil } // Pick the next primary on a schedulable node, if the current is running on an unschedulable one, @@ -186,7 +225,7 @@ func (r *ClusterReconciler) setPrimaryOnSchedulableNode( contextLogger := log.FromContext(ctx) // Checking failed pods, e.g. pending pods due to missing PVCs - _, hasFailedPods := cluster.Status.InstancesStatus[utils.PodFailed] + _, hasFailedPods := cluster.Status.InstancesStatus[apiv1.PodFailed] // Checking whether there are pods on other nodes podsOnOtherNodes := GetPodsNotOnPrimaryNode(status, primaryPod) @@ -216,7 +255,7 @@ func (r *ClusterReconciler) setPrimaryOnSchedulableNode( // Start looking for the next primary among the pods for _, candidate := range podsOnOtherNodes.Items { // If candidate on an unschedulable node too, skip it - if unschedulable, _ := r.isNodeUnschedulable(ctx, candidate.Node); unschedulable { + if status, _ := r.isNodeUnschedulableOrBeingDrained(ctx, candidate.Node); status { continue } @@ -309,7 +348,10 @@ func GetPodsNotOnPrimaryNode( status postgres.PostgresqlStatusList, primaryPod *postgres.PostgresqlStatus, ) postgres.PostgresqlStatusList { - podsOnOtherNodes := postgres.PostgresqlStatusList{} + podsOnOtherNodes := postgres.PostgresqlStatusList{ + IsReplicaCluster: status.IsReplicaCluster, + CurrentPrimary: status.CurrentPrimary, + } if primaryPod == nil { return podsOnOtherNodes } @@ -350,13 +392,13 @@ func (r *ClusterReconciler) evaluateFailoverDelay( } if cluster.Status.CurrentPrimaryFailingSinceTimestamp == "" { - cluster.Status.CurrentPrimaryFailingSinceTimestamp = utils.GetCurrentTimestamp() + cluster.Status.CurrentPrimaryFailingSinceTimestamp = pgTime.GetCurrentTimestamp() if err := r.Status().Update(ctx, cluster); err != nil { return err } } - primaryFailingSince, err := utils.DifferenceBetweenTimestamps( - utils.GetCurrentTimestamp(), + primaryFailingSince, err := pgTime.DifferenceBetweenTimestamps( + pgTime.GetCurrentTimestamp(), cluster.Status.CurrentPrimaryFailingSinceTimestamp, ) if err != nil { diff --git a/internal/controller/replicas_quorum.go b/internal/controller/replicas_quorum.go new file mode 100644 index 0000000000..0794365578 --- /dev/null +++ b/internal/controller/replicas_quorum.go @@ -0,0 +1,185 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" +) + +// evaluateQuorumCheck evaluate the quorum check algorithm to detect if a failover +// is possible without losing any transaction. +// "true" is returned when there is surely a replica containing all the transactions, +// "false" is returned otherwise. +// When an error is raised, the caller should not start a failover. +func (r *ClusterReconciler) evaluateQuorumCheck( + ctx context.Context, + cluster *apiv1.Cluster, + statusList postgres.PostgresqlStatusList, +) (bool, error) { + contextLogger := log.FromContext(ctx).WithValues("tag", "quorumCheck") + + var failoverQuorum apiv1.FailoverQuorum + if err := r.Get(ctx, client.ObjectKeyFromObject(cluster), &failoverQuorum); err != nil { + if apierrs.IsNotFound(err) { + contextLogger.Warning( + "Quorum check failed because no synchronous metadata is available. Denying the failover request") + return false, nil + } + + contextLogger.Error(err, + "Quorum check failed because the synchronous replica metadata couldn't be read") + return false, err + } + + return r.evaluateQuorumCheckWithStatus(ctx, &failoverQuorum, statusList) +} + +// evaluateQuorumCheckWithStatus is used internally by evaluateQuorumCheck, +// primarily at the benefit of the unit tests +func (r *ClusterReconciler) evaluateQuorumCheckWithStatus( + ctx context.Context, + failoverQuorum *apiv1.FailoverQuorum, + statusList postgres.PostgresqlStatusList, +) (bool, error) { + contextLogger := log.FromContext(ctx).WithValues("tag", "quorumCheck") + + syncStatus := failoverQuorum.Status + contextLogger.Trace("Dumping latest synchronous replication status", "syncStatus", syncStatus) + + // Step 1: coherence check of the synchrouous replication information + if syncStatus.StandbyNumber <= 0 { + contextLogger.Warning( + "Quorum check failed a unsupported synchronous nodes number") + return false, nil + } + + if len(syncStatus.StandbyNames) == 0 { + contextLogger.Warning( + "Quorum check failed because the list of synchronous replicas is empty") + return false, nil + } + + // Step 2: detect promotable replicas + candidateReplicas := stringset.New() + for _, record := range statusList.Items { + if record.Error == nil && record.IsPodReady { + candidateReplicas.Put(record.Pod.Name) + } + } + + // Step 3: evaluate quorum check algorithm + // + // Important: R + W > N <==> strong consistency + // With: + // N = the cardinality of the synchronous_standby_names set + // W = the sync number or 0 if we're changing a replica configuration. + // R = the cardinality of the set of promotable replicas within the + // synchronous_standby_names set + // + // When this criteria is satisfied we surely have a node containing + // the latest transaction. + // + // The case having W == 0 has been already sorted out in the coherence check. + + nodeSet := stringset.From(syncStatus.StandbyNames) + writeSetCardinality := syncStatus.StandbyNumber + readSet := nodeSet.Intersect(candidateReplicas) + + nodeSetCardinality := nodeSet.Len() + readSetCardinality := readSet.Len() + + isStronglyConsistent := (readSetCardinality + writeSetCardinality) > nodeSetCardinality + + contextLogger.Info( + "Quorum check algorithm results", + "isStronglyConsistent", isStronglyConsistent, + "readSetCardinality", readSetCardinality, + "readSet", readSet.ToSortedList(), + "writeSetCardinality", writeSetCardinality, + "nodeSet", nodeSet.ToSortedList(), + "nodeSetCardinality", nodeSetCardinality, + ) + + if !isStronglyConsistent { + contextLogger.Info("Strong consistency check failed. Preventing failover.") + } + + return isStronglyConsistent, nil +} + +func (r *ClusterReconciler) reconcileFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx).WithValues("tag", "quorumCheck") + + syncConfig := cluster.Spec.PostgresConfiguration.Synchronous + failoverQuorumActive, err := cluster.IsFailoverQuorumActive() + if err != nil { + contextLogger.Error(err, "Failed to determine if failover quorum is active") + } + if syncConfig != nil && failoverQuorumActive { + return r.ensureFailoverQuorumObjectExists(ctx, cluster) + } + + return r.ensureFailoverQuorumObjectDoesNotExist(ctx, cluster) +} + +func (r *ClusterReconciler) ensureFailoverQuorumObjectExists(ctx context.Context, cluster *apiv1.Cluster) error { + failoverQuorum := apiv1.FailoverQuorum{ + TypeMeta: metav1.TypeMeta{ + Kind: "FailoverQuorum", + APIVersion: apiv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name, + }, + } + cluster.SetInheritedDataAndOwnership(&failoverQuorum.ObjectMeta) + + err := r.Create(ctx, &failoverQuorum) + if err != nil && !apierrs.IsAlreadyExists(err) { + log.FromContext(ctx).Error(err, "Unable to create the FailoverQuorum", "object", failoverQuorum) + return err + } + + return nil +} + +func (r *ClusterReconciler) ensureFailoverQuorumObjectDoesNotExist(ctx context.Context, cluster *apiv1.Cluster) error { + var failoverQuorum apiv1.FailoverQuorum + + if err := r.Get(ctx, client.ObjectKeyFromObject(cluster), &failoverQuorum); err != nil { + if apierrs.IsNotFound(err) { + return nil + } + + return err + } + + return r.Delete(ctx, &failoverQuorum) +} diff --git a/internal/controller/replicas_quorum_test.go b/internal/controller/replicas_quorum_test.go new file mode 100644 index 0000000000..b2bda01ff0 --- /dev/null +++ b/internal/controller/replicas_quorum_test.go @@ -0,0 +1,141 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("quorum promotion control", func() { + r := &ClusterReconciler{} + + When("the information is not consistent because the number of synchronous standbies is zero", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 0, + }, + } + + statusList := postgres.PostgresqlStatusList{} + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + }) + + When("the information is not consistent because the standby list is empty", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 3, + StandbyNames: nil, + }, + } + + statusList := postgres.PostgresqlStatusList{} + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + }) + + When("there is no quorum", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 1, + StandbyNames: []string{ + "postgres-2", + "postgres-3", + }, + }, + } + + statusList := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-3", + }, + }, + Error: nil, + IsPodReady: true, + }, + }, + } + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + }) + + When("there is quorum", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 1, + StandbyNames: []string{ + "postgres-2", + "postgres-3", + }, + }, + } + + statusList := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-2", + }, + }, + Error: nil, + IsPodReady: true, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-3", + }, + }, + Error: nil, + IsPodReady: true, + }, + }, + } + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + }) + }) +}) diff --git a/internal/controller/replicas_test.go b/internal/controller/replicas_test.go index fcba496d90..634a9dafa3 100644 --- a/internal/controller/replicas_test.go +++ b/internal/controller/replicas_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/rollout/doc.go b/internal/controller/rollout/doc.go new file mode 100644 index 0000000000..04c1c9afd9 --- /dev/null +++ b/internal/controller/rollout/doc.go @@ -0,0 +1,23 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package rollout contains the rollout manager, allowing +// CloudNative-PG to spread Pod rollouts depending on +// the passed configuration +package rollout diff --git a/internal/controller/rollout/rollout.go b/internal/controller/rollout/rollout.go new file mode 100644 index 0000000000..ff97ab7d9c --- /dev/null +++ b/internal/controller/rollout/rollout.go @@ -0,0 +1,113 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package rollout + +import ( + "sync" + "time" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// The type of functions returning a moment in time +type timeFunc func() time.Time + +// Manager is the rollout manager. It is safe to use +// concurrently +type Manager struct { + m sync.Mutex + + // The amount of time we wait between rollouts of + // different clusters + clusterRolloutDelay time.Duration + + // The amount of time we wait between instances of + // the same cluster + instanceRolloutDelay time.Duration + + // This is used to get the current time. Mainly + // used by the unit tests to inject a fake time + timeProvider timeFunc + + // The following data is relative to the last + // rollout + lastInstance string + lastCluster client.ObjectKey + lastUpdate time.Time +} + +// Result is the output of the rollout manager, telling the +// operator how much time we need to wait to rollout a Pod +type Result struct { + // This is true when the Pod can be rolled out immediately + RolloutAllowed bool + + // This is set with the amount of time the operator need + // to wait to rollout that Pod + TimeToWait time.Duration +} + +// New creates a new rollout manager with the passed configuration +func New(clusterRolloutDelay, instancesRolloutDelay time.Duration) *Manager { + return &Manager{ + timeProvider: time.Now, + clusterRolloutDelay: clusterRolloutDelay, + instanceRolloutDelay: instancesRolloutDelay, + } +} + +// CoordinateRollout is called to check whether this rollout is allowed or not +// by the manager +func (manager *Manager) CoordinateRollout( + cluster client.ObjectKey, + instanceName string, +) Result { + manager.m.Lock() + defer manager.m.Unlock() + + if manager.lastCluster == cluster { + return manager.coordinateRolloutWithTime(cluster, instanceName, manager.instanceRolloutDelay) + } + return manager.coordinateRolloutWithTime(cluster, instanceName, manager.clusterRolloutDelay) +} + +func (manager *Manager) coordinateRolloutWithTime( + cluster client.ObjectKey, + instanceName string, + t time.Duration, +) Result { + now := manager.timeProvider() + timeSinceLastRollout := now.Sub(manager.lastUpdate) + + if manager.lastUpdate.IsZero() || timeSinceLastRollout >= t { + manager.lastCluster = cluster + manager.lastInstance = instanceName + manager.lastUpdate = now + return Result{ + RolloutAllowed: true, + TimeToWait: 0, + } + } + + return Result{ + RolloutAllowed: false, + TimeToWait: t - timeSinceLastRollout, + } +} diff --git a/internal/controller/rollout/rollout_test.go b/internal/controller/rollout/rollout_test.go new file mode 100644 index 0000000000..6d4e6d0d6c --- /dev/null +++ b/internal/controller/rollout/rollout_test.go @@ -0,0 +1,167 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package rollout + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Rollout manager", func() { + It("should coordinate rollouts when delays are set", func() { + startTime := time.Now() + currentTime := startTime + + const ( + clustersRolloutDelay = 10 * time.Minute + instancesRolloutDelay = 5 * time.Minute + ) + + m := New(clustersRolloutDelay, instancesRolloutDelay) + m.timeProvider = func() time.Time { + return currentTime + } + + By("allowing the first rollout immediately", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("waiting for one minute", func() { + currentTime = currentTime.Add(1 * time.Minute) + }) + + By("checking that a rollout of an instance is not allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-2") + + Expect(result.RolloutAllowed).To(BeFalse()) + Expect(result.TimeToWait).To(Equal(4 * time.Minute)) + Expect(m.lastUpdate).To(Equal(startTime)) + }) + + By("checking that a rollout of a cluster is not allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeFalse()) + Expect(result.TimeToWait).To(Equal(9 * time.Minute)) + Expect(m.lastUpdate).To(Equal(startTime)) + }) + + By("waiting for five minutes", func() { + currentTime = currentTime.Add(5 * time.Minute) + }) + + By("checking that a rollout of a cluster is still not allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeFalse()) + Expect(result.TimeToWait).To(Equal(4 * time.Minute)) + Expect(m.lastUpdate).To(Equal(startTime)) + }) + + By("checking that a rollout of an instance is allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-2") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + Expect(m.lastUpdate).To(Equal(currentTime)) + }) + + By("waiting for other eleven minutes", func() { + currentTime = currentTime.Add(11 * time.Minute) + }) + + By("checking that a rollout of a cluster is allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + Expect(m.lastUpdate).To(Equal(currentTime)) + }) + }) + + It("should allow all rollouts when delays are not set", func() { + m := New(0, 0) + + By("allowing the first rollout immediately", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("allowing a rollout of an instance", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-2") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("allowing a rollout of an cluster", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("allowing a rollout of another instance", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-3") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + }) +}) diff --git a/internal/controller/rollout/suite_test.go b/internal/controller/rollout/suite_test.go new file mode 100644 index 0000000000..82c0fa602e --- /dev/null +++ b/internal/controller/rollout/suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package rollout + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCerts(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Rollout manager suite") +} diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go index 45c71ad5a8..913b1fa00d 100644 --- a/internal/controller/scheduledbackup_controller.go +++ b/internal/controller/scheduledbackup_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -24,6 +27,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "github.com/robfig/cron" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +36,7 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -238,7 +243,7 @@ func createBackup( // So we have no backup running, let's create a backup. // Let's have deterministic names to avoid creating the job two // times - name := fmt.Sprintf("%s-%s", scheduledBackup.GetName(), utils.ToCompactISO8601(backupTime)) + name := fmt.Sprintf("%s-%s", scheduledBackup.GetName(), pgTime.ToCompactISO8601(backupTime)) backup := scheduledBackup.CreateBackup(name) metadata := &backup.ObjectMeta if metadata.Labels == nil { @@ -325,7 +330,11 @@ func (r *ScheduledBackupReconciler) GetChildBackups( } // SetupWithManager install this controller in the controller manager -func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { +func (r *ScheduledBackupReconciler) SetupWithManager( + ctx context.Context, + mgr ctrl.Manager, + maxConcurrentReconciles int, +) error { // Create a new indexed field on backups. This field will be used to easily // find all the backups created by this controller if err := mgr.GetFieldIndexer().IndexField( @@ -342,7 +351,7 @@ func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ct return nil } - if owner.APIVersion != apiGVString { + if owner.APIVersion != apiSGVString { return nil } @@ -352,6 +361,8 @@ func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ct } return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&apiv1.ScheduledBackup{}). + Named("scheduled-backup"). Complete(r) } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index ae8f8e8638..d567ba4dc1 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -74,6 +77,7 @@ func buildTestEnvironment() *testingEnvironment { k8sClient := fake.NewClientBuilder().WithScheme(scheme). WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Backup{}, &apiv1.Pooler{}, &corev1.Service{}, &corev1.ConfigMap{}, &corev1.Secret{}). + WithIndex(&batchv1.Job{}, jobOwnerKey, jobOwnerIndexFunc). Build() Expect(err).ToNot(HaveOccurred()) @@ -151,7 +155,7 @@ func newFakePooler(k8sClient client.Client, cluster *apiv1.Cluster) *apiv1.Poole // upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308 pooler.TypeMeta = metav1.TypeMeta{ Kind: apiv1.PoolerKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), } return pooler @@ -219,7 +223,7 @@ func newFakeCNPGCluster( // upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308 cluster.TypeMeta = metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), } return cluster @@ -270,7 +274,7 @@ func newFakeCNPGClusterWithPGWal(k8sClient client.Client, namespace string) *api // upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308 cluster.TypeMeta = metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), } return cluster @@ -313,7 +317,7 @@ func generateFakeClusterPods( var pods []corev1.Pod for idx < cluster.Spec.Instances { idx++ - pod := specs.PodWithExistingStorage(*cluster, idx) + pod, _ := specs.NewInstance(context.TODO(), *cluster, idx, true) cluster.SetInheritedDataAndOwnership(&pod.ObjectMeta) err := c.Create(context.Background(), pod) diff --git a/internal/management/cache/cache.go b/internal/management/cache/cache.go index ad1b559e8c..3860cbd17e 100644 --- a/internal/management/cache/cache.go +++ b/internal/management/cache/cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,25 +13,16 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package cache contains the constants and functions for reading/writing to the process local cache -// some specific supported objects package cache import ( "sync" ) -const ( - // ClusterKey is the key to be used to access the cached cluster - ClusterKey = "cluster" - // WALArchiveKey is the key to be used to access the cached envs for wal-archive - WALArchiveKey = "wal-archive" - // WALRestoreKey is the key to be used to access the cached envs for wal-restore - WALRestoreKey = "wal-restore" -) - var cache sync.Map // Store write an object into the local cache diff --git a/pkg/utils/strings.go b/internal/management/cache/doc.go similarity index 64% rename from pkg/utils/strings.go rename to internal/management/cache/doc.go index 2ee8b0d1f4..b187a141c3 100644 --- a/pkg/utils/strings.go +++ b/internal/management/cache/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +13,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ -package utils +SPDX-License-Identifier: Apache-2.0 +*/ -// StringInSlice looks for a search string inside the string slice -func StringInSlice(slice []string, search string) bool { - for _, s := range slice { - if s == search { - return true - } - } - return false -} +// Package cache contains the constants and functions for reading/writing to the process local cache +// some specific supported objects +package cache diff --git a/internal/management/cache/error.go b/internal/management/cache/error.go index f26af13e43..5747e9e4fd 100644 --- a/internal/management/cache/error.go +++ b/internal/management/cache/error.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package cache diff --git a/internal/management/cache/keys.go b/internal/management/cache/keys.go new file mode 100644 index 0000000000..d7259a06de --- /dev/null +++ b/internal/management/cache/keys.go @@ -0,0 +1,29 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package cache + +const ( + // ClusterKey is the key to be used to access the cached cluster + ClusterKey = "cluster" + // WALArchiveKey is the key to be used to access the cached envs for wal-archive + WALArchiveKey = "wal-archive" + // WALRestoreKey is the key to be used to access the cached envs for wal-restore + WALRestoreKey = "wal-restore" +) diff --git a/internal/management/controller/cache.go b/internal/management/controller/cache.go index bc031de24d..4f6bfaf6ce 100644 --- a/internal/management/controller/cache.go +++ b/internal/management/controller/cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -47,13 +50,15 @@ func (r *InstanceReconciler) updateCacheFromCluster(ctx context.Context, cluster } func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, cluster *apiv1.Cluster) { - _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.PodName) + contextLogger := log.FromContext(ctx) + + _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.GetPodName()) if errors.Is(err, walrestore.ErrNoBackupConfigured) { cache.Delete(cache.WALRestoreKey) return } if err != nil { - log.Error(err, "while getting recover configuration") + contextLogger.Error(err, "while getting recover configuration") return } env = append(env, os.Environ()...) @@ -66,7 +71,7 @@ func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, env, ) if err != nil { - log.Error(err, "while getting recover credentials") + contextLogger.Error(err, "while getting recover credentials") } cache.Store(cache.WALRestoreKey, envRestore) } @@ -79,6 +84,8 @@ func (r *InstanceReconciler) shouldUpdateWALArchiveSettingsCache( ctx context.Context, cluster *apiv1.Cluster, ) (shouldRetry bool) { + contextLogger := log.FromContext(ctx) + if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { cache.Delete(cache.WALArchiveKey) return false @@ -92,12 +99,12 @@ func (r *InstanceReconciler) shouldUpdateWALArchiveSettingsCache( cluster.Spec.Backup.BarmanObjectStore, os.Environ()) if apierrors.IsForbidden(err) { - log.Info("backup credentials don't yet have access permissions. Will retry reconciliation loop") + contextLogger.Info("backup credentials don't yet have access permissions. Will retry reconciliation loop") return true } if err != nil { - log.Error(err, "while getting backup credentials") + contextLogger.Error(err, "while getting backup credentials") return false } diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go new file mode 100644 index 0000000000..51a3d30c8a --- /dev/null +++ b/internal/management/controller/common.go @@ -0,0 +1,175 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "maps" + "slices" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/jackc/pgx/v5" + "github.com/lib/pq" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// errClusterIsReplica is raised when an object +// cannot be reconciled because it belongs to a replica cluster +var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary") + +type instanceInterface interface { + GetSuperUserDB() (*sql.DB, error) + GetClusterName() string + GetPodName() string + GetNamespaceName() string +} + +type markableAsFailed interface { + client.Object + SetAsFailed(err error) +} + +// markAsFailed marks the reconciliation as failed and logs the corresponding error +func markAsFailed( + ctx context.Context, + cli client.Client, + resource markableAsFailed, + err error, +) error { + resource.SetAsFailed(err) + return cli.Status().Update(ctx, resource) +} + +type markableAsUnknown interface { + client.Object + SetAsUnknown(err error) +} + +// markAsUnknown marks the reconciliation as failed and logs the corresponding error +func markAsUnknown( + ctx context.Context, + cli client.Client, + resource markableAsUnknown, + err error, +) error { + resource.SetAsUnknown(err) + return cli.Status().Update(ctx, resource) +} + +type markableAsReady interface { + client.Object + SetAsReady() +} + +// markAsReady marks the reconciliation as succeeded inside the resource +func markAsReady( + ctx context.Context, + cli client.Client, + resource markableAsReady, +) error { + resource.SetAsReady() + return cli.Status().Update(ctx, resource) +} + +func getClusterFromInstance( + ctx context.Context, + cli client.Client, + instance instanceInterface, +) (*apiv1.Cluster, error) { + var cluster apiv1.Cluster + err := cli.Get(ctx, types.NamespacedName{ + Name: instance.GetClusterName(), + Namespace: instance.GetNamespaceName(), + }, &cluster) + return &cluster, err +} + +func toPostgresParameters(parameters map[string]string) string { + if len(parameters) == 0 { + return "" + } + + b := new(bytes.Buffer) + for _, key := range slices.Sorted(maps.Keys(parameters)) { + // TODO(armru): any alternative to pg.QuoteLiteral? + _, _ = fmt.Fprintf(b, "%s = %s, ", pgx.Identifier{key}.Sanitize(), pq.QuoteLiteral(parameters[key])) + } + + // pruning last 2 chars `, ` + return b.String()[:len(b.String())-2] +} + +type postgresResourceManager interface { + client.Object + HasReconciliations() bool + markableAsFailed +} + +type managedResourceExclusivityEnsurer[T postgresResourceManager] interface { + MustHaveManagedResourceExclusivity(newManager T) error + client.ObjectList +} + +func detectConflictingManagers[T postgresResourceManager, TL managedResourceExclusivityEnsurer[T]]( + ctx context.Context, + cli client.Client, + resource T, + list TL, +) (ctrl.Result, error) { + if resource.HasReconciliations() { + return ctrl.Result{}, nil + } + contextLogger := log.FromContext(ctx) + + if err := cli.List(ctx, list, + client.InNamespace(resource.GetNamespace()), + ); err != nil { + kind := list.GetObjectKind().GroupVersionKind().Kind + + contextLogger.Error(err, "while getting list", + "kind", kind, + "namespace", resource.GetNamespace(), + ) + return ctrl.Result{}, fmt.Errorf("impossible to list %s objects in namespace %s: %w", + kind, resource.GetNamespace(), err) + } + + // Make sure the target PG element is not being managed by another kubernetes resource + if conflictErr := list.MustHaveManagedResourceExclusivity(resource); conflictErr != nil { + if markErr := markAsFailed(ctx, cli, resource, conflictErr); markErr != nil { + return ctrl.Result{}, + fmt.Errorf("encountered an error while marking as failed the resource: %w, original error: %w", + markErr, + conflictErr, + ) + } + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + return ctrl.Result{}, nil +} diff --git a/pkg/stringset/suite_test.go b/internal/management/controller/common_test.go similarity index 57% rename from pkg/stringset/suite_test.go rename to internal/management/controller/common_test.go index bb29e64601..4beb734e2c 100644 --- a/pkg/stringset/suite_test.go +++ b/internal/management/controller/common_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,22 +13,23 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package stringset +package controller import ( - "testing" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -func TestConfigFile(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "Configuration File Parsing Suite") -} +var _ = Describe("Conversion of PG parameters from map to string of key/value pairs", func() { + It("returns expected well-formed list", func() { + m := map[string]string{ + "a": "1", "b": "2", + } + res := toPostgresParameters(m) + Expect(res).To(Equal(`"a" = '1', "b" = '2'`)) + }) +}) diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 3ffb963530..7857506d4d 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -19,22 +22,17 @@ package controller import ( "context" "database/sql" - "errors" "fmt" "time" "github.com/cloudnative-pg/machinery/pkg/log" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -43,56 +41,69 @@ type DatabaseReconciler struct { client.Client Scheme *runtime.Scheme - instance instanceInterface + instance instanceInterface + finalizerReconciler *finalizerReconciler[*apiv1.Database] + + getSuperUserDB func() (*sql.DB, error) + getTargetDB func(dbname string) (*sql.DB, error) +} + +// ErrFailedDatabaseObjectReconciliation is raised when a database object failed to reconcile +var ErrFailedDatabaseObjectReconciliation = fmt.Errorf("database object reconciliation failed") + +// schemaObjectManager is the manager of schema objects +var schemaObjectManager = databaseObjectManager[apiv1.SchemaSpec, schemaInfo]{ + get: getDatabaseSchemaInfo, + create: createDatabaseSchema, + update: updateDatabaseSchema, + drop: dropDatabaseSchema, } -type instanceInterface interface { - GetSuperUserDB() (*sql.DB, error) - GetClusterName() string - GetPodName() string - GetNamespaceName() string +// extensionObjectManager is the manager of the extension objects +var extensionObjectManager = databaseObjectManager[apiv1.ExtensionSpec, extInfo]{ + get: getDatabaseExtensionInfo, + create: createDatabaseExtension, + update: updateDatabaseExtension, + drop: dropDatabaseExtension, } -// errClusterIsReplica is raised when the database object -// cannot be reconciled because it belongs to a replica cluster -var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary") +// fdwObjectManager is the manager of the fdw objects +var fdwObjectManager = databaseObjectManager[apiv1.FDWSpec, fdwInfo]{ + get: getDatabaseFDWInfo, + create: createDatabaseFDW, + update: updateDatabaseFDW, + drop: dropDatabaseFDW, +} // databaseReconciliationInterval is the time between the // database reconciliation loop failures const databaseReconciliationInterval = 30 * time.Second -// databaseFinalizerName is the name of the finalizer -// triggering the deletion of the database -const databaseFinalizerName = utils.MetadataNamespace + "/deleteDatabase" - // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=databases,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=databases/status,verbs=get;update;patch // Reconcile is the database reconciliation loop func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - contextLogger, ctx := log.SetupLogger(ctx) - - contextLogger.Debug("Reconciliation loop start") - defer func() { - contextLogger.Debug("Reconciliation loop end") - }() + contextLogger := log.FromContext(ctx). + WithName("database_reconciler"). + WithValues("databaseName", req.Name) // Get the database object var database apiv1.Database - if err := r.Client.Get(ctx, client.ObjectKey{ + if err := r.Get(ctx, client.ObjectKey{ Namespace: req.Namespace, Name: req.Name, }, &database); err != nil { - // This is a deleted object, there's nothing - // to do since we don't manage any finalizers. - if apierrors.IsNotFound(err) { - return ctrl.Result{}, nil - } - return ctrl.Result{}, err + contextLogger.Trace("Could not fetch Database", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(err) } // This is not for me! if database.Spec.ClusterRef.Name != r.instance.GetClusterName() { + contextLogger.Trace("Database is not for this cluster", + "cluster", database.Spec.ClusterRef.Name, + "expected", r.instance.GetClusterName(), + ) return ctrl.Result{}, nil } @@ -104,122 +115,74 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Fetch the Cluster from the cache cluster, err := r.GetCluster(ctx) if err != nil { - if apierrors.IsNotFound(err) { - // The cluster has been deleted. - // We just need to wait for this instance manager to be terminated - contextLogger.Debug("Could not find Cluster") - return ctrl.Result{}, nil - } - - return ctrl.Result{}, fmt.Errorf("could not fetch Cluster: %w", err) + return ctrl.Result{}, markAsFailed(ctx, r.Client, &database, fmt.Errorf("while fetching the cluster: %w", err)) } - // This is not for me, at least now - if cluster.Status.CurrentPrimary != r.instance.GetPodName() { - return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil - } + contextLogger.Info("Reconciling database") + defer func() { + contextLogger.Info("Reconciliation loop of database exited") + }() // Still not for me, we're waiting for a switchover if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - // Cannot do anything on a replica cluster - if cluster.IsReplica() { - return r.failedReconciliation( - ctx, - &database, - errClusterIsReplica, - ) + // This is not for me, at least now + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - // Add the finalizer if we don't have it - // nolint:nestif - if database.DeletionTimestamp.IsZero() { - if controllerutil.AddFinalizer(&database, databaseFinalizerName) { - if err := r.Update(ctx, &database); err != nil { - return ctrl.Result{}, err - } - } - } else { - // This database is being deleted - if controllerutil.ContainsFinalizer(&database, databaseFinalizerName) { - if database.Spec.ReclaimPolicy == apiv1.DatabaseReclaimDelete { - if err := r.deleteDatabase(ctx, &database); err != nil { - return ctrl.Result{}, err - } - } - - // remove our finalizer from the list and update it. - controllerutil.RemoveFinalizer(&database, databaseFinalizerName) - if err := r.Update(ctx, &database); err != nil { - return ctrl.Result{}, err - } + // Cannot do anything on a replica cluster + if cluster.IsReplica() { + if err := markAsUnknown(ctx, r.Client, &database, errClusterIsReplica); err != nil { + return ctrl.Result{}, err } + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil + } + if err := r.finalizerReconciler.reconcile(ctx, &database); err != nil { + return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) + } + if !database.GetDeletionTimestamp().IsZero() { return ctrl.Result{}, nil } - if err := r.reconcileDatabase( - ctx, - &database, - ); err != nil { - return r.failedReconciliation( - ctx, - &database, - err, - ) + if res, err := detectConflictingManagers(ctx, r.Client, &database, &apiv1.DatabaseList{}); err != nil || + !res.IsZero() { + return res, err } - return r.succeededReconciliation( - ctx, - &database, - ) -} - -// failedReconciliation marks the reconciliation as failed and logs the corresponding error -func (r *DatabaseReconciler) failedReconciliation( - ctx context.Context, - database *apiv1.Database, - err error, -) (ctrl.Result, error) { - oldDatabase := database.DeepCopy() - database.Status.Error = err.Error() - database.Status.Ready = false - - var statusError *instance.StatusError - if errors.As(err, &statusError) { - // The body line of the instance manager contains the human - // readable error - database.Status.Error = statusError.Body + if err := r.reconcileDatabaseResource(ctx, &database); err != nil { + if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the database resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the database resource: %w, original error: %w", + markErr, + err) + } + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { + if err := markAsReady(ctx, r.Client, &database); err != nil { return ctrl.Result{}, err } - - return ctrl.Result{ - RequeueAfter: databaseReconciliationInterval, - }, nil + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } -// succeededReconciliation marks the reconciliation as succeeded -func (r *DatabaseReconciler) succeededReconciliation( - ctx context.Context, - database *apiv1.Database, -) (ctrl.Result, error) { - oldDatabase := database.DeepCopy() - database.Status.Error = "" - database.Status.Ready = true - database.Status.ObservedGeneration = database.Generation - - if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { - return ctrl.Result{}, err +func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1.Database) error { + if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete { + return nil + } + sqlDB, err := r.getSuperUserDB() + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) } - return ctrl.Result{ - RequeueAfter: databaseReconciliationInterval, - }, nil + return dropDatabase(ctx, sqlDB, db) } // NewDatabaseReconciler creates a new database reconciler @@ -227,41 +190,102 @@ func NewDatabaseReconciler( mgr manager.Manager, instance *postgres.Instance, ) *DatabaseReconciler { - return &DatabaseReconciler{ + dr := &DatabaseReconciler{ Client: mgr.GetClient(), instance: instance, + getSuperUserDB: func() (*sql.DB, error) { + return instance.GetSuperUserDB() + }, + getTargetDB: func(dbname string) (*sql.DB, error) { + return instance.ConnectionPool().Connection(dbname) + }, } + + dr.finalizerReconciler = newFinalizerReconciler( + mgr.GetClient(), + utils.DatabaseFinalizerName, + dr.evaluateDropDatabase, + ) + + return dr } // SetupWithManager sets up the controller with the Manager. func (r *DatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Database{}). + Named("instance-database"). Complete(r) } // GetCluster gets the managed cluster through the client func (r *DatabaseReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { - var cluster apiv1.Cluster - err := r.Client.Get(ctx, - types.NamespacedName{ - Namespace: r.instance.GetNamespaceName(), - Name: r.instance.GetClusterName(), - }, - &cluster) + return getClusterFromInstance(ctx, r.Client, r.instance) +} + +func (r *DatabaseReconciler) reconcileDatabaseResource(ctx context.Context, obj *apiv1.Database) error { + db, err := r.getSuperUserDB() if err != nil { - return nil, err + return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) + } + + if obj.Spec.Ensure == apiv1.EnsureAbsent { + return dropDatabase(ctx, db, obj) + } + + if err := r.reconcilePostgresDatabase(ctx, db, obj); err != nil { + return err + } + + if err := r.reconcileDatabaseObjects(ctx, obj); err != nil { + return err } - return &cluster, nil + for _, status := range obj.Status.Schemas { + if !status.Applied { + return ErrFailedDatabaseObjectReconciliation + } + } + for _, status := range obj.Status.Extensions { + if !status.Applied { + return ErrFailedDatabaseObjectReconciliation + } + } + for _, status := range obj.Status.FDWs { + if !status.Applied { + return ErrFailedDatabaseObjectReconciliation + } + } + + return nil } -func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.Database) error { - db, err := r.instance.GetSuperUserDB() +func (r *DatabaseReconciler) reconcileDatabaseObjects( + ctx context.Context, + obj *apiv1.Database, +) error { + objectCount := 0 + objectCount += len(obj.Spec.Schemas) + objectCount += len(obj.Spec.Extensions) + objectCount += len(obj.Spec.FDWs) + + if objectCount == 0 { + return nil + } + + db, err := r.getTargetDB(obj.Spec.Name) if err != nil { - return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) + return fmt.Errorf("while connecting to the database %q: %v", obj.Spec.Name, err) } + obj.Status.Schemas = schemaObjectManager.reconcileList(ctx, db, obj.Spec.Schemas) + obj.Status.Extensions = extensionObjectManager.reconcileList(ctx, db, obj.Spec.Extensions) + obj.Status.FDWs = fdwObjectManager.reconcileList(ctx, db, obj.Spec.FDWs) + + return nil +} + +func (r *DatabaseReconciler) reconcilePostgresDatabase(ctx context.Context, db *sql.DB, obj *apiv1.Database) error { dbExists, err := detectDatabase(ctx, db, obj) if err != nil { return fmt.Errorf("while detecting the database %q: %w", obj.Spec.Name, err) @@ -273,12 +297,3 @@ func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.D return createDatabase(ctx, db, obj) } - -func (r *DatabaseReconciler) deleteDatabase(ctx context.Context, obj *apiv1.Database) error { - db, err := r.instance.GetSuperUserDB() - if err != nil { - return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) - } - - return dropDatabase(ctx, db, obj) -} diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index b8875e8a73..83289800ea 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -19,13 +22,36 @@ package controller import ( "context" "database/sql" + "errors" "fmt" + "strings" + "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/pgx/v5" + "github.com/lib/pq" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) +type extInfo struct { + Name string `json:"name"` + Version string `json:"version"` + Schema string `json:"schema"` +} + +type schemaInfo struct { + Name string `json:"name"` + Owner string `json:"owner"` +} + +type fdwInfo struct { + Name string `json:"name"` + Handler string `json:"handler"` + Validator string `json:"validator"` + Owner string `json:"owner"` + Options map[string]apiv1.OptionSpecValue `json:"options"` +} + func detectDatabase( ctx context.Context, db *sql.DB, @@ -35,8 +61,8 @@ func detectDatabase( ctx, ` SELECT count(*) - FROM pg_database - WHERE datname = $1 + FROM pg_catalog.pg_database + WHERE datname = $1 `, obj.Spec.Name) if row.Err() != nil { @@ -56,26 +82,69 @@ func createDatabase( db *sql.DB, obj *apiv1.Database, ) error { - sqlCreateDatabase := fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize()) + contextLogger := log.FromContext(ctx) + + var sqlCreateDatabase strings.Builder + sqlCreateDatabase.WriteString(fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize())) if len(obj.Spec.Owner) > 0 { - sqlCreateDatabase += fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize()) + sqlCreateDatabase.WriteString(fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize())) + } + if len(obj.Spec.Template) > 0 { + sqlCreateDatabase.WriteString(fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize())) } if len(obj.Spec.Tablespace) > 0 { - sqlCreateDatabase += fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize()) + sqlCreateDatabase.WriteString(fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize())) } if obj.Spec.AllowConnections != nil { - sqlCreateDatabase += fmt.Sprintf(" ALLOW_CONNECTIONS %v", *obj.Spec.AllowConnections) + sqlCreateDatabase.WriteString(fmt.Sprintf(" ALLOW_CONNECTIONS %v", *obj.Spec.AllowConnections)) } if obj.Spec.ConnectionLimit != nil { - sqlCreateDatabase += fmt.Sprintf(" CONNECTION LIMIT %v", *obj.Spec.ConnectionLimit) + sqlCreateDatabase.WriteString(fmt.Sprintf(" CONNECTION LIMIT %v", *obj.Spec.ConnectionLimit)) } if obj.Spec.IsTemplate != nil { - sqlCreateDatabase += fmt.Sprintf(" IS_TEMPLATE %v", *obj.Spec.IsTemplate) + sqlCreateDatabase.WriteString(fmt.Sprintf(" IS_TEMPLATE %v", *obj.Spec.IsTemplate)) + } + if obj.Spec.Encoding != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" ENCODING %s", pgx.Identifier{obj.Spec.Encoding}.Sanitize())) + } + if obj.Spec.Locale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE %s", pgx.Identifier{obj.Spec.Locale}.Sanitize())) + } + if obj.Spec.LocaleProvider != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE_PROVIDER %s", + pgx.Identifier{obj.Spec.LocaleProvider}.Sanitize())) + } + if obj.Spec.LcCollate != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_COLLATE %s", pgx.Identifier{obj.Spec.LcCollate}.Sanitize())) + } + if obj.Spec.LcCtype != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_CTYPE %s", pgx.Identifier{obj.Spec.LcCtype}.Sanitize())) + } + if obj.Spec.IcuLocale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_LOCALE %s", pgx.Identifier{obj.Spec.IcuLocale}.Sanitize())) + } + if obj.Spec.IcuRules != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize())) + } + if obj.Spec.BuiltinLocale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", + pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize())) + } + if obj.Spec.CollationVersion != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" COLLATION_VERSION %s", + pgx.Identifier{obj.Spec.CollationVersion}.Sanitize())) } - _, err := db.ExecContext(ctx, sqlCreateDatabase) + _, err := db.ExecContext(ctx, sqlCreateDatabase.String()) + if err != nil { + contextLogger.Error(err, "while creating database", "query", sqlCreateDatabase.String()) + } - return err + if err != nil { + return fmt.Errorf("while creating database %q: %w", + obj.Spec.Name, err) + } + return nil } func updateDatabase( @@ -83,6 +152,8 @@ func updateDatabase( db *sql.DB, obj *apiv1.Database, ) error { + contextLogger := log.FromContext(ctx) + if obj.Spec.AllowConnections != nil { changeAllowConnectionsSQL := fmt.Sprintf( "ALTER DATABASE %s WITH ALLOW_CONNECTIONS %v", @@ -90,6 +161,7 @@ func updateDatabase( *obj.Spec.AllowConnections) if _, err := db.ExecContext(ctx, changeAllowConnectionsSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeAllowConnectionsSQL) return fmt.Errorf("while altering database %q with allow_connections %t: %w", obj.Spec.Name, *obj.Spec.AllowConnections, err) } @@ -102,6 +174,7 @@ func updateDatabase( *obj.Spec.ConnectionLimit) if _, err := db.ExecContext(ctx, changeConnectionsLimitSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeConnectionsLimitSQL) return fmt.Errorf("while altering database %q with connection limit %d: %w", obj.Spec.Name, *obj.Spec.ConnectionLimit, err) } @@ -114,6 +187,7 @@ func updateDatabase( *obj.Spec.IsTemplate) if _, err := db.ExecContext(ctx, changeIsTemplateSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeIsTemplateSQL) return fmt.Errorf("while altering database %q with is_template %t: %w", obj.Spec.Name, *obj.Spec.IsTemplate, err) } @@ -126,7 +200,8 @@ func updateDatabase( pgx.Identifier{obj.Spec.Owner}.Sanitize()) if _, err := db.ExecContext(ctx, changeOwnerSQL); err != nil { - return fmt.Errorf("while altering database %q owner %s to: %w", + contextLogger.Error(err, "while altering database", "query", changeOwnerSQL) + return fmt.Errorf("while altering database %q owner to %s: %w", obj.Spec.Name, obj.Spec.Owner, err) } } @@ -138,7 +213,8 @@ func updateDatabase( pgx.Identifier{obj.Spec.Tablespace}.Sanitize()) if _, err := db.ExecContext(ctx, changeTablespaceSQL); err != nil { - return fmt.Errorf("while altering database %q tablespace %s: %w", + contextLogger.Error(err, "while altering database", "query", changeTablespaceSQL) + return fmt.Errorf("while altering database %q tablespace to %s: %w", obj.Spec.Name, obj.Spec.Tablespace, err) } } @@ -151,13 +227,478 @@ func dropDatabase( db *sql.DB, obj *apiv1.Database, ) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize()) _, err := db.ExecContext( ctx, - fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize()), - ) + query) if err != nil { + contextLogger.Error(err, "while dropping database", "query", query) return fmt.Errorf("while dropping database %q: %w", obj.Spec.Name, err) } return nil } + +const detectDatabaseExtensionSQL = ` +SELECT e.extname, e.extversion, n.nspname +FROM pg_catalog.pg_extension e +JOIN pg_catalog.pg_namespace n ON e.extnamespace=n.oid +WHERE e.extname = $1 +` + +func getDatabaseExtensionInfo(ctx context.Context, db *sql.DB, ext apiv1.ExtensionSpec) (*extInfo, error) { + row := db.QueryRowContext( + ctx, detectDatabaseExtensionSQL, + ext.Name) + if row.Err() != nil { + return nil, fmt.Errorf("while checking if extension %q exists: %w", ext.Name, row.Err()) + } + + var result extInfo + if err := row.Scan(&result.Name, &result.Version, &result.Schema); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("while scanning if extension %q exists: %w", ext.Name, err) + } + + return &result, nil +} + +func createDatabaseExtension(ctx context.Context, db *sql.DB, ext apiv1.ExtensionSpec) error { + contextLogger := log.FromContext(ctx) + + var sqlCreateExtension strings.Builder + sqlCreateExtension.WriteString(fmt.Sprintf("CREATE EXTENSION %s ", pgx.Identifier{ext.Name}.Sanitize())) + if len(ext.Version) > 0 { + sqlCreateExtension.WriteString(fmt.Sprintf(" VERSION %s", pgx.Identifier{ext.Version}.Sanitize())) + } + if len(ext.Schema) > 0 { + sqlCreateExtension.WriteString(fmt.Sprintf(" SCHEMA %s", pgx.Identifier{ext.Schema}.Sanitize())) + } + + _, err := db.ExecContext(ctx, sqlCreateExtension.String()) + if err != nil { + contextLogger.Error(err, "while creating extension", "query", sqlCreateExtension.String()) + return err + } + contextLogger.Info("created extension", "name", ext.Name) + + return nil +} + +func dropDatabaseExtension(ctx context.Context, db *sql.DB, ext apiv1.ExtensionSpec) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP EXTENSION IF EXISTS %s", pgx.Identifier{ext.Name}.Sanitize()) + _, err := db.ExecContext( + ctx, + query) + if err != nil { + contextLogger.Error(err, "while dropping extension", "query", query) + return err + } + contextLogger.Info("dropped extension", "name", ext.Name) + return nil +} + +func updateDatabaseExtension(ctx context.Context, db *sql.DB, spec apiv1.ExtensionSpec, info *extInfo) error { + contextLogger := log.FromContext(ctx) + if len(spec.Schema) > 0 && spec.Schema != info.Schema { + changeSchemaSQL := fmt.Sprintf( + "ALTER EXTENSION %s SET SCHEMA %v", + pgx.Identifier{spec.Name}.Sanitize(), + pgx.Identifier{spec.Schema}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeSchemaSQL); err != nil { + return fmt.Errorf("altering schema: %w", err) + } + + contextLogger.Info("altered extension schema", "name", spec.Name, "schema", spec.Schema) + } + + if len(spec.Version) > 0 && spec.Version != info.Version { + //nolint:gosec + changeVersionSQL := fmt.Sprintf( + "ALTER EXTENSION %s UPDATE TO %v", + pgx.Identifier{spec.Name}.Sanitize(), + pgx.Identifier{spec.Version}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeVersionSQL); err != nil { + return fmt.Errorf("altering version: %w", err) + } + + contextLogger.Info("altered extension version", "name", spec.Name, "version", spec.Version) + } + + return nil +} + +const detectDatabaseSchemaSQL = ` +SELECT n.nspname, a.rolname +FROM pg_catalog.pg_namespace n +JOIN pg_catalog.pg_authid a ON n.nspowner = a.oid +WHERE n.nspname = $1 +` + +func getDatabaseSchemaInfo(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec) (*schemaInfo, error) { + row := db.QueryRowContext( + ctx, detectDatabaseSchemaSQL, + schema.Name) + if row.Err() != nil { + return nil, fmt.Errorf("while checking if schema %q exists: %w", schema.Name, row.Err()) + } + + var result schemaInfo + if err := row.Scan(&result.Name, &result.Owner); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("while scanning if schema %q exists: %w", schema.Name, err) + } + + return &result, nil +} + +func createDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec) error { + contextLogger := log.FromContext(ctx) + + var sqlCreateExtension strings.Builder + sqlCreateExtension.WriteString(fmt.Sprintf("CREATE SCHEMA %s ", pgx.Identifier{schema.Name}.Sanitize())) + if len(schema.Owner) > 0 { + sqlCreateExtension.WriteString(fmt.Sprintf(" AUTHORIZATION %s", pgx.Identifier{schema.Owner}.Sanitize())) + } + + _, err := db.ExecContext(ctx, sqlCreateExtension.String()) + if err != nil { + contextLogger.Error(err, "while creating schema", "query", sqlCreateExtension.String()) + return err + } + contextLogger.Info("created schema", "name", schema.Name) + + return nil +} + +func updateDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec, info *schemaInfo) error { + contextLogger := log.FromContext(ctx) + if len(schema.Owner) > 0 && schema.Owner != info.Owner { + changeSchemaSQL := fmt.Sprintf( + "ALTER SCHEMA %s OWNER TO %v", + pgx.Identifier{schema.Name}.Sanitize(), + pgx.Identifier{schema.Owner}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeSchemaSQL); err != nil { + return fmt.Errorf("altering schema: %w", err) + } + + contextLogger.Info("altered schema owner", "name", schema.Name, "owner", schema.Owner) + } + + return nil +} + +func dropDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP SCHEMA IF EXISTS %s", pgx.Identifier{schema.Name}.Sanitize()) + _, err := db.ExecContext( + ctx, + query) + if err != nil { + contextLogger.Error(err, "while dropping schema", "query", query) + return err + } + contextLogger.Info("dropped schema", "name", schema.Name) + return nil +} + +const detectDatabaseFDWSQL = ` +SELECT + fdwname, fdwhandler::regproc::text, fdwvalidator::regproc::text, fdwoptions, + a.rolname AS owner +FROM pg_foreign_data_wrapper f +JOIN pg_authid a ON f.fdwowner = a.oid +WHERE fdwname = $1 +` + +func getDatabaseFDWInfo(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec) (*fdwInfo, error) { + contextLogger := log.FromContext(ctx) + + row := db.QueryRowContext( + ctx, detectDatabaseFDWSQL, + fdw.Name) + if row.Err() != nil { + return nil, fmt.Errorf("while checking if FDW %q exists: %w", fdw.Name, row.Err()) + } + + var ( + result fdwInfo + optionsRaw pq.StringArray + ) + + if err := row.Scan(&result.Name, &result.Handler, &result.Validator, &optionsRaw, &result.Owner); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("while scanning if FDW %q exists: %w", fdw.Name, err) + } + + // Extract options from SQL raw format(e.g. -{host=localhost,port=5432}) to type OptSpec + opts := make(map[string]apiv1.OptionSpecValue, len(optionsRaw)) + for _, opt := range optionsRaw { + parts := strings.SplitN(opt, "=", 2) + if len(parts) == 2 { + opts[parts[0]] = apiv1.OptionSpecValue{ + Value: parts[1], + } + } else { + contextLogger.Info( + "skipping unparsable option, expected \"keyword=value\"", + "optionsRaw", optionsRaw, + "fdwName", fdw.Name) + } + } + result.Options = opts + + return &result, nil +} + +// updateDatabaseFDWUsage updates the usage permissions for a foreign data wrapper +// based on the provided FDW specification. +// It supports granting or revoking usage permissions for specified users. +func updateDatabaseFDWUsage(ctx context.Context, db *sql.DB, fdw *apiv1.FDWSpec) error { + contextLogger := log.FromContext(ctx) + + for _, usageSpec := range fdw.Usages { + switch usageSpec.Type { + case "grant": + changeUsageSQL := fmt.Sprintf( + "GRANT USAGE ON FOREIGN DATA WRAPPER %s TO %s", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{usageSpec.Name}.Sanitize()) + if _, err := db.ExecContext(ctx, changeUsageSQL); err != nil { + return fmt.Errorf("granting usage of foreign data wrapper %w", err) + } + contextLogger.Info("granted usage of foreign data wrapper", "name", fdw.Name, "user", usageSpec.Name) + + case "revoke": + changeUsageSQL := fmt.Sprintf( + "REVOKE USAGE ON FOREIGN DATA WRAPPER %s FROM %s", // #nosec G201 + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{usageSpec.Name}.Sanitize()) + if _, err := db.ExecContext(ctx, changeUsageSQL); err != nil { + return fmt.Errorf("revoking usage of foreign data wrapper %w", err) + } + contextLogger.Info("revoked usage of foreign data wrapper", "name", fdw.Name, "user", usageSpec.Name) + + default: + contextLogger.Warning( + "unknown usage type", + "type", usageSpec.Type, "fdwName", fdw.Name) + } + } + + return nil +} + +func createDatabaseFDW(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec) error { + contextLogger := log.FromContext(ctx) + + var sqlCreateFDW strings.Builder + sqlCreateFDW.WriteString(fmt.Sprintf("CREATE FOREIGN DATA WRAPPER %s ", pgx.Identifier{fdw.Name}.Sanitize())) + + // Create Handler + if len(fdw.Handler) > 0 { + switch fdw.Handler { + case "-": + sqlCreateFDW.WriteString("NO HANDLER ") + default: + sqlCreateFDW.WriteString(fmt.Sprintf("HANDLER %s ", pgx.Identifier{fdw.Handler}.Sanitize())) + } + } + + // Create Validator + if len(fdw.Validator) > 0 { + switch fdw.Validator { + case "-": + sqlCreateFDW.WriteString("NO VALIDATOR ") + default: + sqlCreateFDW.WriteString(fmt.Sprintf("VALIDATOR %s ", pgx.Identifier{fdw.Validator}.Sanitize())) + } + } + + // Extract options + opts := make([]string, 0, len(fdw.Options)) + for _, optionSpec := range fdw.Options { + if optionSpec.Ensure == apiv1.EnsureAbsent { + continue + } + opts = append(opts, fmt.Sprintf("%s '%s'", pgx.Identifier{optionSpec.Name}.Sanitize(), + optionSpec.Value)) + } + if len(opts) > 0 { + sqlCreateFDW.WriteString("OPTIONS (" + strings.Join(opts, ", ") + ")") + } + + _, err := db.ExecContext(ctx, sqlCreateFDW.String()) + if err != nil { + contextLogger.Error(err, "while creating foreign data wrapper", "query", sqlCreateFDW.String()) + return err + } + contextLogger.Info("created foreign data wrapper", "name", fdw.Name) + + // Update usage permissions + if len(fdw.Usages) > 0 { + if err := updateDatabaseFDWUsage(ctx, db, &fdw); err != nil { + return err + } + } + + return nil +} + +func updateFDWOptions(ctx context.Context, db *sql.DB, fdw *apiv1.FDWSpec, info *fdwInfo) error { + contextLogger := log.FromContext(ctx) + + // Collect individual ALTER-clauses + var clauses []string + for _, desiredOptSpec := range fdw.Options { + curOptSpec, exists := info.Options[desiredOptSpec.Name] + + switch { + case desiredOptSpec.Ensure == apiv1.EnsurePresent && !exists: + clauses = append(clauses, fmt.Sprintf("ADD %s '%s'", + pgx.Identifier{desiredOptSpec.Name}.Sanitize(), desiredOptSpec.Value)) + + case desiredOptSpec.Ensure == apiv1.EnsurePresent && exists: + if desiredOptSpec.Value != curOptSpec.Value { + clauses = append(clauses, fmt.Sprintf("SET %s '%s'", + pgx.Identifier{desiredOptSpec.Name}.Sanitize(), desiredOptSpec.Value)) + } + + case desiredOptSpec.Ensure == apiv1.EnsureAbsent && exists: + clauses = append(clauses, fmt.Sprintf("DROP %s", pgx.Identifier{desiredOptSpec.Name}.Sanitize())) + } + } + + if len(clauses) == 0 { + return nil + } + + // Build SQL + changeOptionSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s OPTIONS (%s)", pgx.Identifier{fdw.Name}.Sanitize(), + strings.Join(clauses, ", "), + ) + + if _, err := db.ExecContext(ctx, changeOptionSQL); err != nil { + return fmt.Errorf("altering options of foreign data wrapper %w", err) + } + contextLogger.Info("altered foreign data wrapper options", "name", fdw.Name, "options", fdw.Options) + + return nil +} + +func updateDatabaseFDW(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec, info *fdwInfo) error { + contextLogger := log.FromContext(ctx) + + // Alter Handler + if len(fdw.Handler) > 0 && fdw.Handler != info.Handler { + switch fdw.Handler { + case "-": + changeHandlerSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s NO HANDLER", + pgx.Identifier{fdw.Name}.Sanitize(), + ) + if _, err := db.ExecContext(ctx, changeHandlerSQL); err != nil { + return fmt.Errorf("removing handler of foreign data wrapper %w", err) + } + contextLogger.Info("removed foreign data wrapper handler", "name", fdw.Name) + + default: + changeHandlerSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s HANDLER %s", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{fdw.Handler}.Sanitize(), + ) + if _, err := db.ExecContext(ctx, changeHandlerSQL); err != nil { + return fmt.Errorf("altering handler of foreign data wrapper %w", err) + } + contextLogger.Info("altered foreign data wrapper handler", "name", fdw.Name, "handler", fdw.Handler) + } + } + + // Alter Validator + if len(fdw.Validator) > 0 && fdw.Validator != info.Validator { + switch fdw.Validator { + case "-": + changeValidatorSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s NO VALIDATOR", + pgx.Identifier{fdw.Name}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeValidatorSQL); err != nil { + return fmt.Errorf("removing validator of foreign data wrapper %w", err) + } + + contextLogger.Info("removed foreign data wrapper validator", "name", fdw.Name) + + default: + changeValidatorSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s VALIDATOR %s", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{fdw.Validator}.Sanitize(), + ) + if _, err := db.ExecContext(ctx, changeValidatorSQL); err != nil { + return fmt.Errorf("altering validator of foreign data wrapper %w", err) + } + + contextLogger.Info("altered foreign data wrapper validator", "name", fdw.Name, "validator", fdw.Validator) + } + } + + // Alter the owner + if len(fdw.Owner) > 0 && fdw.Owner != info.Owner { + changeOwnerSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s OWNER TO %v", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{fdw.Owner}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeOwnerSQL); err != nil { + return fmt.Errorf("altering owner of foreign data wrapper %w", err) + } + + contextLogger.Info("altered foreign data wrapper owner", "name", fdw.Name, "owner", fdw.Owner) + } + + // Alter Options + if err := updateFDWOptions(ctx, db, &fdw, info); err != nil { + return err + } + + // Update usage permissions + if len(fdw.Usages) > 0 { + if err := updateDatabaseFDWUsage(ctx, db, &fdw); err != nil { + return err + } + } + + return nil +} + +func dropDatabaseFDW(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP FOREIGN DATA WRAPPER IF EXISTS %s", pgx.Identifier{fdw.Name}.Sanitize()) + _, err := db.ExecContext( + ctx, + query) + if err != nil { + contextLogger.Error(err, "while dropping foreign data wrapper", "query", query) + return err + } + contextLogger.Info("dropped foreign data wrapper", "name", fdw.Name) + return nil +} diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index 3cfdf123a7..f520039f55 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -1,18 +1,22 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ + package controller import ( @@ -65,7 +69,7 @@ var _ = Describe("Managed Database SQL", func() { It("returns true when it detects an existing Database", func(ctx SpecContext) { expectedValue := sqlmock.NewRows([]string{""}).AddRow("1") dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database + FROM pg_catalog.pg_database WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) dbExists, err := detectDatabase(ctx, db, database) @@ -76,7 +80,7 @@ var _ = Describe("Managed Database SQL", func() { It("returns false when a Database is missing", func(ctx SpecContext) { expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database + FROM pg_catalog.pg_database WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) dbExists, err := detectDatabase(ctx, db, database) @@ -88,17 +92,65 @@ var _ = Describe("Managed Database SQL", func() { Context("createDatabase", func() { It("should create a new Database", func(ctx SpecContext) { database.Spec.IsTemplate = ptr.To(true) + database.Spec.Template = "myTemplate" database.Spec.Tablespace = "myTablespace" database.Spec.AllowConnections = ptr.To(true) database.Spec.ConnectionLimit = ptr.To(-1) expectedValue := sqlmock.NewResult(0, 1) expectedQuery := fmt.Sprintf( - "CREATE DATABASE %s OWNER %s TABLESPACE %s "+ + "CREATE DATABASE %s OWNER %s TEMPLATE %s TABLESPACE %s "+ "ALLOW_CONNECTIONS %t CONNECTION LIMIT %d IS_TEMPLATE %t", pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), - pgx.Identifier{database.Spec.Tablespace}.Sanitize(), *database.Spec.AllowConnections, - *database.Spec.ConnectionLimit, *database.Spec.IsTemplate, + pgx.Identifier{database.Spec.Template}.Sanitize(), pgx.Identifier{database.Spec.Tablespace}.Sanitize(), + *database.Spec.AllowConnections, *database.Spec.ConnectionLimit, *database.Spec.IsTemplate, + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + err = createDatabase(ctx, db, database) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should create a new Database with locale and encoding kind fields", func(ctx SpecContext) { + database.Spec.Locale = "POSIX" + database.Spec.LocaleProvider = "icu" + database.Spec.LcCtype = "en_US.utf8" + database.Spec.LcCollate = "C" + database.Spec.Encoding = "LATIN1" + database.Spec.IcuLocale = "en" + database.Spec.IcuRules = "fr" + + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s "+ + "ENCODING %s LOCALE %s LOCALE_PROVIDER %s LC_COLLATE %s LC_CTYPE %s "+ + "ICU_LOCALE %s ICU_RULES %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + pgx.Identifier{database.Spec.Encoding}.Sanitize(), pgx.Identifier{database.Spec.Locale}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), + pgx.Identifier{database.Spec.LcCollate}.Sanitize(), + pgx.Identifier{database.Spec.LcCtype}.Sanitize(), + pgx.Identifier{database.Spec.IcuLocale}.Sanitize(), pgx.Identifier{database.Spec.IcuRules}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + err = createDatabase(ctx, db, database) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should create a new Database with builtin locale", func(ctx SpecContext) { + database.Spec.LocaleProvider = "builtin" + database.Spec.BuiltinLocale = "C" + database.Spec.CollationVersion = "1.2.3" + + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s "+ + "LOCALE_PROVIDER %s BUILTIN_LOCALE %s COLLATION_VERSION %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), + pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(), + pgx.Identifier{database.Spec.CollationVersion}.Sanitize(), ) dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) @@ -176,3 +228,603 @@ var _ = Describe("Managed Database SQL", func() { }) }) }) + +var _ = Describe("Managed Extensions SQL", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + ext apiv1.ExtensionSpec + err error + + testError error + ) + + BeforeEach(func() { + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + ext = apiv1.ExtensionSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testext", + Ensure: "present", + }, + Version: "1.0", + Schema: "default", + } + + testError = fmt.Errorf("test error") + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + Context("getDatabaseExtensionInfo", func() { + It("returns info when the extension exists", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseExtensionSQL). + WithArgs(ext.Name). + WillReturnRows( + sqlmock.NewRows([]string{"extname", "extversion", "nspname"}). + AddRow("testext", "1.0", "default"), + ) + extInfo, err := getDatabaseExtensionInfo(ctx, db, ext) + Expect(err).ToNot(HaveOccurred()) + Expect(extInfo).ToNot(BeNil()) + Expect(extInfo.Name).To(Equal("testext")) + Expect(extInfo.Schema).To(Equal("default")) + Expect(extInfo.Version).To(Equal("1.0")) + }) + + It("returns nil info when the extension does not exist", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseExtensionSQL). + WithArgs(ext.Name). + WillReturnRows( + sqlmock.NewRows([]string{"extname", "extversion", "nspname"}), + ) + extInfo, err := getDatabaseExtensionInfo(ctx, db, ext) + Expect(err).ToNot(HaveOccurred()) + Expect(extInfo).To(BeNil()) + }) + }) + + Context("createDatabaseExtension", func() { + createExtensionSQL := "CREATE EXTENSION \"testext\" VERSION \"1.0\" SCHEMA \"default\"" + + It("returns success when the extension has been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createExtensionSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(createDatabaseExtension(ctx, db, ext)).Error().NotTo(HaveOccurred()) + }) + + It("fails when the extension could not be created", func(ctx SpecContext) { + dbMock. + ExpectExec(createExtensionSQL). + WillReturnError(testError) + Expect(createDatabaseExtension(ctx, db, ext)).Error().To(Equal(testError)) + }) + }) + + Context("dropDatabaseExtension", func() { + dropExtensionSQL := "DROP EXTENSION IF EXISTS \"testext\"" + + It("returns success when the extension has been dropped", func(ctx SpecContext) { + dbMock. + ExpectExec(dropExtensionSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(dropDatabaseExtension(ctx, db, ext)).Error().NotTo(HaveOccurred()) + }) + + It("returns an error when the DROP statement failed", func(ctx SpecContext) { + dbMock. + ExpectExec(dropExtensionSQL). + WillReturnError(testError) + + Expect(dropDatabaseExtension(ctx, db, ext)).Error().To(Equal(testError)) + }) + }) + + Context("updateDatabaseExtension", func() { + It("does nothing when the extension is already at the correct version", func(ctx SpecContext) { + Expect(updateDatabaseExtension(ctx, db, ext, &extInfo{ + Name: ext.Name, + Version: ext.Version, + Schema: ext.Schema, + })).Error().NotTo(HaveOccurred()) + }) + + It("updates the extension version", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" UPDATE TO \"1.0\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseExtension(ctx, db, ext, + &extInfo{Name: ext.Name, Version: "0.9", Schema: ext.Schema})).Error().NotTo(HaveOccurred()) + }) + + It("updates the schema", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseExtension(ctx, db, ext, + &extInfo{Name: ext.Name, Version: ext.Version, Schema: "old"})).Error().NotTo(HaveOccurred()) + }) + + It("sets the schema and the extension version", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" UPDATE TO \"1.0\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseExtension(ctx, db, ext, &extInfo{ + Name: ext.Name, Version: "0.9", + Schema: "old", + })).Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the schema failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnError(testError) + + Expect(updateDatabaseExtension(ctx, db, ext, + &extInfo{Name: ext.Name, Version: ext.Version, Schema: "old"})).Error().To(MatchError(testError)) + }) + + It("fail when setting the version failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" UPDATE TO \"1.0\""). + WillReturnError(testError) + + Expect(updateDatabaseExtension(ctx, db, ext, &extInfo{ + Name: ext.Name, Version: "0.9", + Schema: "old", + })).Error().To(MatchError(testError)) + }) + }) +}) + +var _ = Describe("Managed schema SQL", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + schema apiv1.SchemaSpec + err error + + testError error + ) + + BeforeEach(func() { + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + schema = apiv1.SchemaSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testschema", + Ensure: "present", + }, + Owner: "owner", + } + + testError = fmt.Errorf("test error") + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + Context("getDatabaseSchemaInfo", func() { + It("returns info when the extension exits", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseSchemaSQL). + WithArgs(schema.Name). + WillReturnRows( + sqlmock.NewRows([]string{"name", "owner"}). + AddRow("name", "owner"), + ) + schemaInfo, err := getDatabaseSchemaInfo(ctx, db, schema) + Expect(err).ToNot(HaveOccurred()) + Expect(schemaInfo).ToNot(BeNil()) + Expect(schemaInfo.Name).To(Equal("name")) + Expect(schemaInfo.Owner).To(Equal("owner")) + }) + + It("returns nil info when the extension does not exist", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseSchemaSQL). + WithArgs(schema.Name). + WillReturnRows( + sqlmock.NewRows([]string{"name", "owner"}), + ) + schemaInfo, err := getDatabaseSchemaInfo(ctx, db, schema) + Expect(err).ToNot(HaveOccurred()) + Expect(schemaInfo).To(BeNil()) + }) + }) + + Context("createDatabaseSchema", func() { + createSchemaSQL := "CREATE SCHEMA \"testschema\" AUTHORIZATION \"owner\"" + + It("returns success when the schema has been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createSchemaSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(createDatabaseSchema(ctx, db, schema)).Error().NotTo(HaveOccurred()) + }) + + It("fails when the schema has not been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createSchemaSQL). + WillReturnError(testError) + Expect(createDatabaseSchema(ctx, db, schema)).Error().To(Equal(testError)) + }) + }) + + Context("updateDatabaseSchema", func() { + It("does nothing when the schema has been correctly reconciled", func(ctx SpecContext) { + Expect(updateDatabaseSchema(ctx, db, schema, &schemaInfo{ + Name: schema.Name, + Owner: schema.Owner, + })).Error().NotTo(HaveOccurred()) + }) + + It("updates the schema owner", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER SCHEMA \"testschema\" OWNER TO \"owner\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseSchema(ctx, db, schema, + &schemaInfo{Name: schema.Name, Owner: "old"})).Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the owner failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER SCHEMA \"testschema\" OWNER TO \"owner\""). + WillReturnError(testError) + + Expect(updateDatabaseSchema(ctx, db, schema, + &schemaInfo{Name: schema.Name, Owner: "old"})).Error().To(MatchError(testError)) + }) + }) + + Context("dropDatabaseSchema", func() { + dropSchemaSQL := "DROP SCHEMA IF EXISTS \"testschema\"" + + It("returns success when the extension has been dropped", func(ctx SpecContext) { + dbMock. + ExpectExec(dropSchemaSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(dropDatabaseSchema(ctx, db, schema)).Error().NotTo(HaveOccurred()) + }) + + It("returns an error when the DROP statement failed", func(ctx SpecContext) { + dbMock. + ExpectExec(dropSchemaSQL). + WillReturnError(testError) + + Expect(dropDatabaseSchema(ctx, db, schema)).Error().To(Equal(testError)) + }) + }) +}) + +var _ = Describe("Managed Foreign Data Wrapper SQL", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + fdw apiv1.FDWSpec + err error + + testError error + ) + + BeforeEach(func() { + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + fdw = apiv1.FDWSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testfdw", + Ensure: "present", + }, + Handler: "testhandler", + Validator: "testvalidator", + Options: []apiv1.OptionSpec{ + { + Name: "testoption", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "testvalue", + }, + }, + }, + Owner: "owner", + } + + testError = fmt.Errorf("test error") + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + Context("getDatabaseFDWInfo", func() { + It("returns info when the fdw exits", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseFDWSQL). + WithArgs(fdw.Name). + WillReturnRows( + sqlmock.NewRows([]string{"fdwname", "fdwhandler", "fdwvalidator", "options", "fdwowner"}). + AddRow("testfdw", "testhandler", "testvalidator", nil, "testowner"), + ) + fdwInfo, err := getDatabaseFDWInfo(ctx, db, fdw) + Expect(err).ToNot(HaveOccurred()) + Expect(fdwInfo).ToNot(BeNil()) + Expect(fdwInfo.Name).To(Equal("testfdw")) + Expect(fdwInfo.Handler).To(Equal("testhandler")) + Expect(fdwInfo.Validator).To(Equal("testvalidator")) + Expect(fdwInfo.Owner).To(Equal("testowner")) + }) + + It("returns nil info when the fdw does not exist", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseFDWSQL). + WithArgs(fdw.Name). + WillReturnRows( + sqlmock.NewRows([]string{"fdwname", "fdwhandler", "fdwvalidator", "options", "fdwowner"}), + ) + fdwInfo, err := getDatabaseFDWInfo(ctx, db, fdw) + Expect(err).ToNot(HaveOccurred()) + Expect(fdwInfo).To(BeNil()) + }) + }) + + Context("createDatabaseFDW", func() { + createFDWSQL := "CREATE FOREIGN DATA WRAPPER \"testfdw\" HANDLER \"testhandler\" " + + "VALIDATOR \"testvalidator\" OPTIONS (\"testoption\" 'testvalue')" + + It("returns success when the fdw has been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createFDWSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(createDatabaseFDW(ctx, db, fdw)).Error().NotTo(HaveOccurred()) + }) + + It("fails when the fdw could not be created", func(ctx SpecContext) { + dbMock. + ExpectExec(createFDWSQL). + WillReturnError(testError) + Expect(createDatabaseFDW(ctx, db, fdw)).Error().To(Equal(testError)) + }) + + It("success with NO HANDLER and NO VALIDATOR", func(ctx SpecContext) { + dbMock. + ExpectExec("CREATE FOREIGN DATA WRAPPER \"testfdw\" NO HANDLER NO VALIDATOR"). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(createDatabaseFDW(ctx, db, apiv1.FDWSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testfdw", + Ensure: "present", + }, + Handler: "-", + Validator: "-", + Owner: "owner", + })).Error().NotTo(HaveOccurred()) + }) + }) + + Context("updateDatabaseFDW", func() { + It("does nothing when the fdw has been correctly reconciled", func(ctx SpecContext) { + Expect(updateDatabaseFDW(ctx, db, fdw, &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Owner: fdw.Owner, + })).Error().NotTo(HaveOccurred()) + }) + + It("updates the fdw handler", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" HANDLER \"testhandler\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: "oldhandler", Validator: fdw.Validator, Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("handles removal of handler when not specified", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" NO HANDLER"). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fdw.Handler = "-" + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: "oldhandler", Validator: fdw.Validator, Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the handler failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" HANDLER \"testhandler\""). + WillReturnError(testError) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: "oldhandler", Validator: fdw.Validator, Owner: fdw.Owner})). + Error().To(MatchError(testError)) + }) + + It("updates the fdw validator", func(ctx SpecContext) { + dbMock.ExpectExec( + "ALTER FOREIGN DATA WRAPPER \"testfdw\" VALIDATOR \"testvalidator\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: "oldvalidator", Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("handles removal of validator when not specified", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" NO VALIDATOR"). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fdw.Validator = "-" + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: "oldvalidator", Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the validator failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" VALIDATOR \"testvalidator\""). + WillReturnError(testError) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: "oldvalidator", Owner: fdw.Owner})). + Error().To(MatchError(testError)) + }) + + It("add new fdw options", func(ctx SpecContext) { + fdw.Options = []apiv1.OptionSpec{ + { + Name: "add_option", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "value", + Ensure: apiv1.EnsurePresent, + }, + }, + } + info := &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Options: map[string]apiv1.OptionSpecValue{ + "modify_option": {Value: "old_value"}, + "remove_option": {Value: "value"}, + }, + Owner: fdw.Owner, + } + + expectedSQL := "ALTER FOREIGN DATA WRAPPER \"testfdw\" OPTIONS (ADD \"add_option\" 'value')" + dbMock.ExpectExec(expectedSQL).WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, info)).Error().NotTo(HaveOccurred()) + }) + + It("modify the fdw options", func(ctx SpecContext) { + fdw.Options = []apiv1.OptionSpec{ + { + Name: "modify_option", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "new_value", + Ensure: apiv1.EnsurePresent, + }, + }, + } + info := &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Options: map[string]apiv1.OptionSpecValue{ + "modify_option": {Value: "old_value"}, + "remove_option": {Value: "value"}, + }, + Owner: fdw.Owner, + } + + expectedSQL := "ALTER FOREIGN DATA WRAPPER \"testfdw\" OPTIONS (SET \"modify_option\" 'new_value')" + dbMock.ExpectExec(expectedSQL).WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, info)).Error().NotTo(HaveOccurred()) + }) + + It("remove new fdw options", func(ctx SpecContext) { + fdw.Options = []apiv1.OptionSpec{ + { + Name: "remove_option", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "value", + Ensure: apiv1.EnsureAbsent, + }, + }, + } + info := &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Options: map[string]apiv1.OptionSpecValue{ + "modify_option": {Value: "old_value"}, + "remove_option": {Value: "value"}, + }, + Owner: fdw.Owner, + } + + expectedSQL := "ALTER FOREIGN DATA WRAPPER \"testfdw\" OPTIONS (DROP \"remove_option\")" + dbMock.ExpectExec(expectedSQL).WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, info)).Error().NotTo(HaveOccurred()) + }) + + It("updates the fdw owner", func(ctx SpecContext) { + dbMock.ExpectExec( + "ALTER FOREIGN DATA WRAPPER \"testfdw\" OWNER TO \"owner\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: fdw.Validator, Owner: "oldowner"})). + Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the owner failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" OWNER TO \"owner\""). + WillReturnError(testError) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: fdw.Validator, Owner: "old"})). + Error().To(MatchError(testError)) + }) + + It("updates the usages permissions of the fdw", func(ctx SpecContext) { + dbMock.ExpectExec( + "GRANT USAGE ON FOREIGN DATA WRAPPER \"testfdw\" TO \"owner\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + fdw.Usages = []apiv1.UsageSpec{ + { + Name: "owner", + Type: "grant", + }, + } + Expect(updateDatabaseFDWUsage(ctx, db, &fdw)).Error().NotTo(HaveOccurred()) + }) + }) + + Context("dropDatabaseFDW", func() { + dropFDWSQL := "DROP FOREIGN DATA WRAPPER IF EXISTS \"testfdw\"" + + It("returns success when the foreign data wrapper has been dropped", func(ctx SpecContext) { + dbMock. + ExpectExec(dropFDWSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(dropDatabaseFDW(ctx, db, fdw)).Error().NotTo(HaveOccurred()) + }) + + It("returns an error when the DROP statement failed", func(ctx SpecContext) { + dbMock. + ExpectExec(dropFDWSQL). + WillReturnError(testError) + + Expect(dropDatabaseFDW(ctx, db, fdw)).Error().To(Equal(testError)) + }) + }) +}) diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index 99a1231fe9..569aa87950 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +13,24 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( + "context" "database/sql" "fmt" "github.com/DATA-DOG/go-sqlmock" "github.com/jackc/pgx/v5" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -32,19 +38,15 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -type fakeInstanceData struct { - *postgres.Instance - db *sql.DB -} - -func (f *fakeInstanceData) GetSuperUserDB() (*sql.DB, error) { - return f.db, nil -} +const databaseDetectionQuery = `SELECT count(*) + FROM pg_catalog.pg_database + WHERE datname = $1` var _ = Describe("Managed Database status", func() { var ( @@ -78,24 +80,19 @@ var _ = Describe("Managed Database status", func() { ClusterRef: corev1.LocalObjectReference{ Name: cluster.Name, }, - Name: "db-one", - Owner: "app", + ReclaimPolicy: apiv1.DatabaseReclaimDelete, + Name: "db-one", + Owner: "app", }, } db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - pgInstance := &postgres.Instance{ - Namespace: "default", - PodName: "cluster-example-1", - ClusterName: "cluster-example", - } - - f := fakeInstanceData{ - Instance: pgInstance, - db: db, - } + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). WithObjects(cluster, database). @@ -105,59 +102,290 @@ var _ = Describe("Managed Database status", func() { r = &DatabaseReconciler{ Client: fakeClient, Scheme: schemeBuilder.BuildWithAllKnownScheme(), - instance: &f, + instance: pgInstance, + getSuperUserDB: func() (*sql.DB, error) { + return db, nil + }, } + r.finalizerReconciler = newFinalizerReconciler( + fakeClient, + utils.DatabaseFinalizerName, + r.evaluateDropDatabase, + ) }) AfterEach(func() { Expect(dbMock.ExpectationsWereMet()).To(Succeed()) }) + It("adds finalizer and sets status ready on success", func(ctx SpecContext) { + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) + + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + Expect(database.GetStatusMessage()).Should(BeEmpty()) + Expect(database.GetFinalizers()).NotTo(BeEmpty()) + }) + It("database object inherits error after patching", func(ctx SpecContext) { - // Mocking DetectDB + expectedError := fmt.Errorf("no permission") expectedValue := sqlmock.NewRows([]string{""}).AddRow("1") - dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database - WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) - // Mocking Alter Database - expectedError := fmt.Errorf("no permission") expectedQuery := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s", pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), ) dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Spec.Name, + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + Expect(database.Status.Applied).Should(HaveValue(BeFalse())) + Expect(database.GetStatusMessage()).Should(ContainSubstring(expectedError.Error())) + }) + + When("reclaim policy is delete", func() { + It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) { + // Mocking DetectDB + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking CreateDB + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Mocking Drop Database + expectedDrop := fmt.Sprintf("DROP DATABASE IF EXISTS %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(database.GetFinalizers()).NotTo(BeEmpty()) + Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + Expect(database.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + database.SetGeneration(database.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, database)).To(Succeed()) + + err = reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + When("reclaim policy is retain", func() { + It("on deletion it removes finalizers and does NOT drop the DB", func(ctx SpecContext) { + database.Spec.ReclaimPolicy = apiv1.DatabaseReclaimRetain + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // Mocking DetectDB + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking CreateDB + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(database.GetFinalizers()).NotTo(BeEmpty()) + Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + Expect(database.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + database.SetGeneration(database.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, database)).To(Succeed()) + + err = reconcileDatabase(ctx, fakeClient, r, database) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + // Since the fakeClient has the `cluster-example` cluster, let's reference + // another cluster `cluster-other` that is not found by the fakeClient + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-other-1"). + WithClusterName("cluster-other") + + r = &DatabaseReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getSuperUserDB: func() (*sql.DB, error) { + return db, nil + }, + } + + // Updating the Database object to reference the newly created Cluster + database.Spec.ClusterRef.Name = "cluster-other" + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + Expect(database.Status.Applied).Should(HaveValue(BeFalse())) + Expect(database.Status.Message).Should(ContainSubstring( + fmt.Sprintf("%q not found", database.Spec.ClusterRef.Name))) + }) + + It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) { + // Initialize a new Database but without creating it in the K8S Cluster + otherDatabase := &apiv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: "db-other", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.DatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "db-one", + Owner: "app", + }, + } + + // Reconcile the database that hasn't been created in the K8S Cluster + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: otherDatabase.Namespace, + Name: otherDatabase.Name, }}) + + // Expect the reconciler to exit silently, since the object doesn't exist Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(BeZero()) // nothing to do, since the DB is being deleted + }) + + It("drops database with ensure absent option", func(ctx SpecContext) { + // Update the obj to set EnsureAbsent + database.Spec.Ensure = apiv1.EnsureAbsent + Expect(fakeClient.Update(ctx, database)).To(Succeed()) - var updatedDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &updatedDatabase) + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "DROP DATABASE IF EXISTS %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Ready).Should(BeFalse()) - Expect(updatedDatabase.Status.Error).Should(ContainSubstring(expectedError.Error())) + Expect(database.Status.Applied).To(HaveValue(BeTrue())) + Expect(database.Status.Message).To(BeEmpty()) + Expect(database.Status.ObservedGeneration).To(BeEquivalentTo(1)) }) - It("properly marks the status on a succeeded reconciliation", func(ctx SpecContext) { - _, err := r.succeededReconciliation(ctx, database) + It("marks as failed if the target Database is already being managed", func(ctx SpecContext) { + // Let's force the database to have a past reconciliation + database.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Update(ctx, database)).To(Succeed()) + + // A new Database Object targeting the same "db-one" + dbDuplicate := &apiv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: "db-duplicate", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.DatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "db-one", + Owner: "app", + }, + } + + // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + Expect(fakeClient.Create(ctx, dbDuplicate)).To(Succeed()) + + err := reconcileDatabase(ctx, fakeClient, r, dbDuplicate) Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Ready).To(BeTrue()) - Expect(database.Status.Error).To(BeEmpty()) + + expectedError := fmt.Sprintf("%q is already managed by object %q", + dbDuplicate.Spec.Name, database.Name) + Expect(dbDuplicate.Status.Applied).To(HaveValue(BeFalse())) + Expect(dbDuplicate.Status.Message).To(ContainSubstring(expectedError)) + Expect(dbDuplicate.Status.ObservedGeneration).To(BeZero()) }) - It("properly marks the status on a failed reconciliation", func(ctx SpecContext) { - exampleError := fmt.Errorf("sample error for database %s", database.Spec.Name) + It("properly signals a database is on a replica cluster", func(ctx SpecContext) { + initialCluster := cluster.DeepCopy() + cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + } + Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) - _, err := r.failedReconciliation(ctx, database, exampleError) + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Ready).To(BeFalse()) - Expect(database.Status.Error).To(BeEquivalentTo(exampleError.Error())) + + Expect(database.Status.Applied).Should(BeNil()) + Expect(database.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) }) }) + +func reconcileDatabase( + ctx context.Context, + fakeClient client.Client, + r *DatabaseReconciler, + database *apiv1.Database, +) error { + GinkgoT().Helper() + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.GetNamespace(), + Name: database.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + return fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.GetNamespace(), + Name: database.GetName(), + }, database) +} diff --git a/internal/management/controller/database_objects.go b/internal/management/controller/database_objects.go new file mode 100644 index 0000000000..a25dd73f28 --- /dev/null +++ b/internal/management/controller/database_objects.go @@ -0,0 +1,148 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +type databaseObjectSpec interface { + GetName() string + GetEnsure() apiv1.EnsureOption +} + +type databaseObjectManager[Spec databaseObjectSpec, Info any] struct { + get func(ctx context.Context, db *sql.DB, spec Spec) (*Info, error) + create func(ctx context.Context, db *sql.DB, spec Spec) error + update func(ctx context.Context, db *sql.DB, spec Spec, info *Info) error + drop func(ctx context.Context, db *sql.DB, spec Spec) error +} + +func createFailedStatus(name, message string) apiv1.DatabaseObjectStatus { + return apiv1.DatabaseObjectStatus{ + Name: name, + Applied: false, + Message: message, + } +} + +func createSuccessStatus(name string) apiv1.DatabaseObjectStatus { + return apiv1.DatabaseObjectStatus{ + Name: name, + Applied: true, + } +} + +func (r *databaseObjectManager[Spec, Info]) reconcileList( + ctx context.Context, + db *sql.DB, + specs []Spec, +) []apiv1.DatabaseObjectStatus { + result := make([]apiv1.DatabaseObjectStatus, len(specs)) + for i := range specs { + spec := specs[i] + result[i] = r.reconcile(ctx, db, spec) + } + return result +} + +func (r *databaseObjectManager[Spec, Info]) reconcile( + ctx context.Context, + db *sql.DB, + spec Spec, +) apiv1.DatabaseObjectStatus { + info, err := r.get(ctx, db, spec) + if err != nil { + return createFailedStatus( + spec.GetName(), + fmt.Sprintf("while reading the object %#v: %v", spec, err), + ) + } + + exists := info != nil + ensureOption := spec.GetEnsure() + + switch { + case !exists && ensureOption == apiv1.EnsurePresent: + return r.reconcileCreate(ctx, db, spec) + + case !exists && ensureOption == apiv1.EnsureAbsent: + return createSuccessStatus(spec.GetName()) + + case exists && ensureOption == apiv1.EnsurePresent: + return r.reconcileUpdate(ctx, db, spec, info) + + case exists && ensureOption == apiv1.EnsureAbsent: + return r.reconcileDrop(ctx, db, spec) + + default: + // If this happens, the CRD and/or the validating webhook + // are not working properly. In this case, let's do nothing: + // better to be safe than sorry. + return createSuccessStatus(spec.GetName()) + } +} + +func (r *databaseObjectManager[Spec, Info]) reconcileCreate( + ctx context.Context, + db *sql.DB, + spec Spec, +) apiv1.DatabaseObjectStatus { + if err := r.create(ctx, db, spec); err != nil { + return createFailedStatus( + spec.GetName(), + err.Error(), + ) + } + + return createSuccessStatus(spec.GetName()) +} + +func (r *databaseObjectManager[Spec, Info]) reconcileUpdate( + ctx context.Context, db *sql.DB, spec Spec, info *Info, +) apiv1.DatabaseObjectStatus { + if err := r.update(ctx, db, spec, info); err != nil { + return createFailedStatus( + spec.GetName(), + err.Error(), + ) + } + + return createSuccessStatus(spec.GetName()) +} + +func (r *databaseObjectManager[Spec, Info]) reconcileDrop( + ctx context.Context, + db *sql.DB, + spec Spec, +) apiv1.DatabaseObjectStatus { + if err := r.drop(ctx, db, spec); err != nil { + return createFailedStatus( + spec.GetName(), + err.Error(), + ) + } + + return createSuccessStatus(spec.GetName()) +} diff --git a/internal/management/controller/externalservers/doc.go b/internal/management/controller/externalservers/doc.go index 47362eb33d..fdf07d5b77 100644 --- a/internal/management/controller/externalservers/doc.go +++ b/internal/management/controller/externalservers/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package externalservers contains the reconciler of external servers, taking diff --git a/internal/management/controller/externalservers/manager.go b/internal/management/controller/externalservers/manager.go index 496ddcba21..3846f8b177 100644 --- a/internal/management/controller/externalservers/manager.go +++ b/internal/management/controller/externalservers/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package externalservers @@ -46,6 +49,7 @@ func NewReconciler(instance *postgres.Instance, client client.Client) *Reconcile func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("instance-external-server"). Complete(r) } @@ -54,8 +58,8 @@ func (r *Reconciler) getCluster(ctx context.Context) (*apiv1.Cluster, error) { var cluster apiv1.Cluster err := r.client.Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { diff --git a/internal/management/controller/externalservers/reconciler.go b/internal/management/controller/externalservers/reconciler.go index 5d246a145a..3e5a298e2e 100644 --- a/internal/management/controller/externalservers/reconciler.go +++ b/internal/management/controller/externalservers/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package externalservers diff --git a/internal/management/controller/finalizers.go b/internal/management/controller/finalizers.go new file mode 100644 index 0000000000..ca355d95cd --- /dev/null +++ b/internal/management/controller/finalizers.go @@ -0,0 +1,68 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type finalizerReconciler[T client.Object] struct { + cli client.Client + finalizerName string + onRemoveFunc func(ctx context.Context, resource T) error +} + +func newFinalizerReconciler[T client.Object]( + cli client.Client, + finalizerName string, + onRemoveFunc func(ctx context.Context, resource T) error, +) *finalizerReconciler[T] { + return &finalizerReconciler[T]{ + cli: cli, + finalizerName: finalizerName, + onRemoveFunc: onRemoveFunc, + } +} + +func (f finalizerReconciler[T]) reconcile(ctx context.Context, resource T) error { + // add finalizer in non-deleted publications if not present + if resource.GetDeletionTimestamp().IsZero() { + if !controllerutil.AddFinalizer(resource, f.finalizerName) { + return nil + } + return f.cli.Update(ctx, resource) + } + + // the publication is being deleted but no finalizer is present, we can quit + if !controllerutil.ContainsFinalizer(resource, f.finalizerName) { + return nil + } + + if err := f.onRemoveFunc(ctx, resource); err != nil { + return err + } + + // remove our finalizer from the list and update it. + controllerutil.RemoveFinalizer(resource, f.finalizerName) + return f.cli.Update(ctx, resource) +} diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index eaa211fb13..e77399fa70 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( "context" - "crypto/tls" "database/sql" "errors" "fmt" @@ -28,24 +30,26 @@ import ( "strconv" "time" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + cnpgiclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/infrastructure" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/reconciler" "github.com/cloudnative-pg/cloudnative-pg/internal/management/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" postgresManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" @@ -58,7 +62,6 @@ import ( externalcluster "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" clusterstatus "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" - pkgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) const ( @@ -88,7 +91,12 @@ func (r *InstanceReconciler) Reconcile( _ reconcile.Request, ) (reconcile.Result, error) { // set up a convenient contextLog object so we don't have to type request over and over again - contextLogger, ctx := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx). + WithValues( + "instance", r.instance.GetPodName(), + "cluster", r.instance.GetClusterName(), + "namespace", r.instance.GetNamespaceName(), + ) // if the context has already been cancelled, // trying to reconcile would just lead to misleading errors being reported @@ -110,8 +118,26 @@ func (r *InstanceReconciler) Reconcile( return reconcile.Result{}, fmt.Errorf("could not fetch Cluster: %w", err) } - // Print the Cluster - contextLogger.Debug("Reconciling Cluster", "cluster", cluster) + contextLogger.Debug("Reconciling Cluster") + + pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) + defer cancelPluginLoading() + + pluginClient, err := cnpgiclient.WithPlugins( + pluginLoadingContext, + r.pluginRepository, + cluster.GetInstanceEnabledPluginNames()..., + ) + if err != nil { + contextLogger.Error(err, "Error loading plugins, retrying") + return ctrl.Result{}, err + } + defer func() { + pluginClient.Close(ctx) + }() + + ctx = cnpgiclient.SetPluginClientInContext(ctx, pluginClient) + ctx = cluster.SetInContext(ctx) // Reconcile PostgreSQL instance parameters r.reconcileInstance(cluster) @@ -125,7 +151,7 @@ func (r *InstanceReconciler) Reconcile( requeueOnMissingPermissions := r.updateCacheFromCluster(ctx, cluster) // Reconcile monitoring section - r.reconcileMetrics(cluster) + r.reconcileMetrics(ctx, cluster) r.reconcileMonitoringQueries(ctx, cluster) // Verify that the promotion token is usable before changing the archive mode and triggering restarts @@ -151,7 +177,10 @@ func (r *InstanceReconciler) Reconcile( // Reconcile secrets and cryptographic material // This doesn't need the PG connection, but it needs to reload it in case of changes - reloadNeeded := r.RefreshSecrets(ctx, cluster) + reloadNeeded, err := r.certificateReconciler.RefreshSecrets(ctx, cluster) + if err != nil { + return reconcile.Result{}, fmt.Errorf("while refreshing secrets: %w", err) + } reloadConfigNeeded, err := r.refreshConfigurationFiles(ctx, cluster) if err != nil { @@ -186,7 +215,7 @@ func (r *InstanceReconciler) Reconcile( return reconcile.Result{}, nil } - if r.instance.IsServerHealthy() != nil { + if err := r.instance.IsReady(); err != nil { contextLogger.Info("Instance is still down, will retry in 1 second") return reconcile.Result{RequeueAfter: time.Second}, nil } @@ -202,7 +231,7 @@ func (r *InstanceReconciler) Reconcile( "tokenContent", tokenError.TokenContent(), ) // We should be waiting for WAL recovery to reach the LSN in the token - return reconcile.Result{RequeueAfter: 10 * time.Second}, err + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } } @@ -223,6 +252,18 @@ func (r *InstanceReconciler) Reconcile( if reloadNeeded && !restarted { contextLogger.Info("reloading the instance") + + // IMPORTANT + // + // We are unsure of the state of the PostgreSQL configuration + // meanwhile a new configuration is applied. + // + // For this reason, before applying a new configuration we + // reset the FailoverQuorum object - de facto preventing any failover - + // and we update it after. + if err = r.resetFailoverQuorumObject(ctx, cluster); err != nil { + return reconcile.Result{}, err + } if err = r.instance.Reload(ctx); err != nil { return reconcile.Result{}, fmt.Errorf("while reloading the instance: %w", err) } @@ -231,22 +272,30 @@ func (r *InstanceReconciler) Reconcile( } } + if err = r.updateFailoverQuorumObject(ctx, cluster); err != nil { + return reconcile.Result{}, err + } + // IMPORTANT // From now on, the database can be assumed as running. Every operation // needing the database to be up should be put below this line. r.configureSlotReplicator(cluster) + postgresDB, err := r.instance.ConnectionPool().Connection("postgres") + if err != nil { + return reconcile.Result{}, fmt.Errorf("while getting the postgres connection: %w", err) + } if result, err := reconciler.ReconcileReplicationSlots( ctx, - r.instance.PodName, - infrastructure.NewPostgresManager(r.instance.ConnectionPool()), + r.instance.GetPodName(), + postgresDB, cluster, ); err != nil || !result.IsZero() { return result, err } - if r.instance.PodName == cluster.Status.CurrentPrimary { + if r.instance.GetPodName() == cluster.Status.CurrentPrimary { result, err := roles.Reconcile(ctx, r.instance, cluster, r.client) if err != nil || !result.IsZero() { return result, err @@ -288,7 +337,7 @@ func (r *InstanceReconciler) Reconcile( // operator. Without another reconciliation loop we would have an incoherent // state of electable synchronous_names inside the configuration. // (this is only relevant if syncReplicaElectionConstraint is enabled) - if requeueOnMissingPermissions || r.shouldRequeueForMissingTopology(cluster) { + if requeueOnMissingPermissions || r.shouldRequeueForMissingTopology(ctx, cluster) { return reconcile.Result{RequeueAfter: 30 * time.Second}, nil } @@ -296,7 +345,7 @@ func (r *InstanceReconciler) Reconcile( } func (r *InstanceReconciler) configureSlotReplicator(cluster *apiv1.Cluster) { - switch r.instance.PodName { + switch r.instance.GetPodName() { case cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary: r.instance.ConfigureSlotReplicator(nil) default: @@ -308,27 +357,26 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested( ctx context.Context, cluster *apiv1.Cluster, ) (bool, error) { - isPrimary := cluster.Status.CurrentPrimary == r.instance.PodName + isPrimary := cluster.Status.CurrentPrimary == r.instance.GetPodName() restartRequested := isPrimary && cluster.Status.Phase == apiv1.PhaseInplacePrimaryRestart if restartRequested { if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { return false, fmt.Errorf("cannot restart the primary in-place when a switchover is in progress") } - restartTimeout := cluster.GetRestartTimeout() if err := r.instance.RequestAndWaitRestartSmartFast( ctx, - time.Duration(restartTimeout)*time.Second, + cluster.GetRestartTimeout(), ); err != nil { return true, err } - return true, clusterstatus.RegisterPhase( + return true, clusterstatus.PatchWithOptimisticLock( ctx, r.client, cluster, - apiv1.PhaseHealthy, - "Primary instance restarted in-place", + clusterstatus.SetPhase(apiv1.PhaseHealthy, "Primary instance restarted in-place"), + clusterstatus.SetClusterReadyCondition, ) } return false, nil @@ -343,7 +391,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles( return false, err } - reloadIdent, err := r.instance.RefreshPGIdent(cluster.Spec.PostgresConfiguration.PgIdent) + reloadIdent, err := r.instance.RefreshPGIdent(ctx, cluster.Spec.PostgresConfiguration.PgIdent) if err != nil { return false, err } @@ -351,7 +399,12 @@ func (r *InstanceReconciler) refreshConfigurationFiles( // Reconcile PostgreSQL configuration // This doesn't need the PG connection, but it needs to reload it in case of changes - reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster(cluster, false) + reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster( + ctx, + cluster, + false, + postgresClient.OperationType_TYPE_RECONCILE, + ) if err != nil { return false, err } @@ -366,7 +419,9 @@ func (r *InstanceReconciler) refreshConfigurationFiles( } func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result { - fencingRequired := cluster.IsInstanceFenced(r.instance.PodName) + contextLogger := log.FromContext(ctx) + + fencingRequired := cluster.IsInstanceFenced(r.instance.GetPodName()) isFenced := r.instance.IsFenced() switch { case !isFenced && fencingRequired: @@ -378,7 +433,7 @@ func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv timeout := time.Second * time.Duration(cluster.GetMaxStartDelay()) err := r.instance.RequestAndWaitFencingOff(ctx, timeout) if err != nil { - log.Error(err, "while waiting for the instance to be restarted after lifting the fence") + contextLogger.Error(err, "while waiting for the instance to be restarted after lifting the fence") } return &reconcile.Result{} } @@ -395,7 +450,7 @@ func handleErrNextLoop(err error) (reconcile.Result, error) { // initialize will handle initialization tasks func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Cluster) error { // we check there are no parameters that would prevent a follower to start - if err := r.verifyParametersForFollower(cluster); err != nil { + if err := r.verifyParametersForFollower(ctx, cluster); err != nil { return err } @@ -411,7 +466,7 @@ func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Clus return err } - r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.PodName)) + r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.GetPodName())) return nil } @@ -421,14 +476,20 @@ func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Clus // This could not be the case if the cluster spec value for one of those parameters // is decreased shortly after having been increased. The follower would be restarting // towards a high level, then write the lower value to the local config -func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) error { +func (r *InstanceReconciler) verifyParametersForFollower( + ctx context.Context, + cluster *apiv1.Cluster, +) error { + contextLogger := log.FromContext(ctx) + if isPrimary, _ := r.instance.IsPrimary(); isPrimary { return nil } // we use a file as a flag to ensure the pod has been restarted already. I.e. on // newly created pod we don't need to check the enforced parameters - filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", constants.Startup, r.instance.PodName)) + filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", + constants.Startup, r.instance.GetPodName())) exists, err := fileutils.FileExists(filename) if err != nil { return err @@ -438,7 +499,7 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) _, err := fileutils.WriteFileAtomic(filename, []byte(nil), 0o600) return err } - log.Info("Found previous run flag", "filename", filename) + contextLogger.Info("Found previous run flag", "filename", filename) controldataParams, err := postgresManagement.LoadEnforcedParametersFromPgControldata(r.instance.PgData) if err != nil { return err @@ -463,7 +524,7 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) if len(options) == 0 { return nil } - log.Info("Updating some enforced parameters that would prevent the instance to start", + contextLogger.Info("Updating some enforced parameters that would prevent the instance to start", "parameters", options, "clusterParams", clusterParams) // we write the safer enforced parameter values to pod config as safety // in the face of cluster specs going up and down from nervous users @@ -482,7 +543,7 @@ func (r *InstanceReconciler) reconcileOldPrimary( ) (restarted bool, err error) { contextLogger := log.FromContext(ctx) - if cluster.Status.TargetPrimary == r.instance.PodName { + if cluster.Status.TargetPrimary == r.instance.GetPodName() { return false, nil } @@ -630,7 +691,7 @@ func (r *InstanceReconciler) reconcileExtensions( for _, extension := range postgres.ManagedExtensions { extensionIsUsed := extension.IsUsed(userSettings) - row := tx.QueryRow("SELECT COUNT(*) > 0 FROM pg_extension WHERE extname = $1", extension.Name) + row := tx.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_extension WHERE extname = $1", extension.Name) err = row.Err() if err != nil { break @@ -698,7 +759,7 @@ func (r *InstanceReconciler) reconcilePoolers( } var existsFunction bool - row = tx.QueryRow(fmt.Sprintf("SELECT COUNT(*) > 0 FROM pg_proc WHERE proname='%s' and prosrc='%s'", + row = tx.QueryRow(fmt.Sprintf("SELECT COUNT(*) > 0 FROM pg_catalog.pg_proc WHERE proname='%s' and prosrc='%s'", userSearchFunctionName, userSearchFunction)) err = row.Scan(&existsFunction) @@ -744,7 +805,7 @@ func (r *InstanceReconciler) reconcileClusterRoleWithoutDB( return false, err } // Reconcile replica role - if cluster.Status.TargetPrimary != r.instance.PodName { + if cluster.Status.TargetPrimary != r.instance.GetPodName() { if !isPrimary { // We need to ensure that this instance is replicating from the correct server return r.instance.RefreshReplicaConfiguration(ctx, cluster, r.client) @@ -762,12 +823,13 @@ func (r *InstanceReconciler) reconcileClusterRoleWithoutDB( // reconcileMetrics updates any required metrics func (r *InstanceReconciler) reconcileMetrics( + ctx context.Context, cluster *apiv1.Cluster, ) { exporter := r.metricsServerExporter // We should never reset the SwitchoverRequired metrics as it needs the primary instance restarts, // however, if the cluster is healthy we make sure it is set to 0. - if cluster.Status.CurrentPrimary == r.instance.PodName { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() { if cluster.Status.Phase == apiv1.PhaseWaitingForUser { exporter.Metrics.SwitchoverRequired.Set(1) } else { @@ -778,7 +840,7 @@ func (r *InstanceReconciler) reconcileMetrics( exporter.Metrics.SyncReplicas.WithLabelValues("min").Set(float64(cluster.Spec.MinSyncReplicas)) exporter.Metrics.SyncReplicas.WithLabelValues("max").Set(float64(cluster.Spec.MaxSyncReplicas)) - syncReplicas := replication.GetExpectedSyncReplicasNumber(cluster) + syncReplicas := replication.GetExpectedSyncReplicasNumber(ctx, cluster) exporter.Metrics.SyncReplicas.WithLabelValues("expected").Set(float64(syncReplicas)) if cluster.IsReplica() { @@ -814,7 +876,7 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( var configMap corev1.ConfigMap err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: reference.Name}, &configMap) if err != nil { contextLogger.Warning("Unable to get configMap containing custom monitoring queries", @@ -841,7 +903,12 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( for _, reference := range cluster.Spec.Monitoring.CustomQueriesSecret { var secret corev1.Secret - err := r.GetClient().Get(ctx, client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, &secret) + err := r.GetClient().Get(ctx, + client.ObjectKey{ + Namespace: r.instance.GetNamespaceName(), + Name: reference.Name, + }, + &secret) if err != nil { contextLogger.Warning("Unable to get secret containing custom monitoring queries", "reference", reference, @@ -868,74 +935,6 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( r.metricsServerExporter.SetCustomQueries(queriesCollector) } -// RefreshSecrets is called when the PostgreSQL secrets are changed -// and will refresh the contents of the file inside the Pod, without -// reloading the actual PostgreSQL instance. -// -// It returns a boolean flag telling if something changed. Usually -// the invoker will check that flag and reload the PostgreSQL -// instance it is up. -// -// This function manages its own errors by logging them, so the -// user cannot easily tell if the operation has been done completely. -// The rationale behind this is: -// -// 1. when invoked at the startup of the instance manager, PostgreSQL -// is not up. If this raise an error, then PostgreSQL won't -// be able to start correctly (TLS certs are missing, i.e.), -// making no difference between returning an error or not -// -// 2. when invoked inside the reconciliation loop, if the operation -// raise an error, it's pointless to retry. The only way to recover -// from such an error is wait for the CNPG operator to refresh the -// resource version of the secrets to be used, and in that case a -// reconciliation loop will be started again. -func (r *InstanceReconciler) RefreshSecrets( - ctx context.Context, - cluster *apiv1.Cluster, -) bool { - contextLogger := log.FromContext(ctx) - - changed := false - - serverSecretChanged, err := r.refreshServerCertificateFiles(ctx, cluster) - if err == nil { - changed = changed || serverSecretChanged - } else if !apierrors.IsNotFound(err) { - contextLogger.Error(err, "Error while getting server secret") - } - - replicationSecretChanged, err := r.refreshReplicationUserCertificate(ctx, cluster) - if err == nil { - changed = changed || replicationSecretChanged - } else if !apierrors.IsNotFound(err) { - contextLogger.Error(err, "Error while getting streaming replication secret") - } - - clientCaSecretChanged, err := r.refreshClientCA(ctx, cluster) - if err == nil { - changed = changed || clientCaSecretChanged - } else if !apierrors.IsNotFound(err) { - contextLogger.Error(err, "Error while getting cluster CA Client secret") - } - - serverCaSecretChanged, err := r.refreshServerCA(ctx, cluster) - if err == nil { - changed = changed || serverCaSecretChanged - } else if !apierrors.IsNotFound(err) { - contextLogger.Error(err, "Error while getting cluster CA Server secret") - } - - barmanEndpointCaSecretChanged, err := r.refreshBarmanEndpointCA(ctx, cluster) - if err == nil { - changed = changed || barmanEndpointCaSecretChanged - } else if !apierrors.IsNotFound(err) { - contextLogger.Error(err, "Error while getting barman endpoint CA secret") - } - - return changed -} - // reconcileInstance sets PostgreSQL instance parameters to current values func (r *InstanceReconciler) reconcileInstance(cluster *apiv1.Cluster) { detectRequiresDesignatedPrimaryTransition := func() bool { @@ -960,6 +959,7 @@ func (r *InstanceReconciler) reconcileInstance(cluster *apiv1.Cluster) { r.instance.MaxStopDelay = cluster.GetMaxStopDelay() r.instance.SmartStopDelay = cluster.GetSmartShutdownTimeout() r.instance.RequiresDesignatedPrimaryTransition = detectRequiresDesignatedPrimaryTransition() + r.instance.Cluster = cluster } // PostgreSQLAutoConfWritable reconciles the permissions bit of `postgresql.auto.conf` @@ -992,7 +992,7 @@ func (r *InstanceReconciler) reconcilePostgreSQLAutoConfFilePermissions(ctx cont // The file is created immediately after initdb and removed after the // first WAL is archived func (r *InstanceReconciler) reconcileCheckWalArchiveFile(cluster *apiv1.Cluster) error { - filePath := filepath.Join(r.instance.PgData, postgresManagement.CheckEmptyWalArchiveFile) + filePath := filepath.Join(r.instance.PgData, constants.CheckEmptyWalArchiveFile) for _, condition := range cluster.Status.Conditions { // If our current condition is archiving we can delete the file if condition.Type == string(apiv1.ConditionContinuousArchiving) && condition.Status == metav1.ConditionTrue { @@ -1025,9 +1025,7 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con phaseReason := "PostgreSQL configuration changed" if status.IsPrimary && status.PendingRestartForDecrease { if cluster.GetPrimaryUpdateStrategy() == apiv1.PrimaryUpdateStrategyUnsupervised { - contextLogger.Info("Restarting primary in-place due to hot standby sensible parameters decrease") - restartTimeout := time.Duration(cluster.GetRestartTimeout()) * time.Second - return r.Instance().RequestAndWaitRestartSmartFast(ctx, restartTimeout) + return r.triggerRestartForDecrease(ctx, cluster) } reason := "decrease of hot standby sensitive parameters" contextLogger.Info("Waiting for the user to request a restart of the primary instance or a switchover "+ @@ -1048,136 +1046,44 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con return nil } - return clusterstatus.RegisterPhase(ctx, r.client, cluster, phase, phaseReason) -} - -// refreshCertificateFilesFromSecret receive a secret and rewrite the file -// corresponding to the server certificate -func (r *InstanceReconciler) refreshInstanceCertificateFromSecret( - secret *corev1.Secret, -) error { - certData, ok := secret.Data[corev1.TLSCertKey] - if !ok { - return fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) - } - - keyData, ok := secret.Data[corev1.TLSPrivateKeyKey] - if !ok { - return fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) - } - - certificate, err := tls.X509KeyPair(certData, keyData) - if err != nil { - return fmt.Errorf("failed decoding Secret: %w", err) - } - - r.instance.ServerCertificate = &certificate - - return err -} - -// refreshCertificateFilesFromSecret receive a secret and rewrite the file -// corresponding to the server certificate -func (r *InstanceReconciler) refreshCertificateFilesFromSecret( - ctx context.Context, - secret *corev1.Secret, - certificateLocation string, - privateKeyLocation string, -) (bool, error) { - contextLogger := log.FromContext(ctx) - - certificate, ok := secret.Data[corev1.TLSCertKey] - if !ok { - return false, fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) - } - - privateKey, ok := secret.Data[corev1.TLSPrivateKeyKey] - if !ok { - return false, fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) - } - - certificateIsChanged, err := fileutils.WriteFileAtomic(certificateLocation, certificate, 0o600) - if err != nil { - return false, fmt.Errorf("while writing server certificate: %w", err) - } - - if certificateIsChanged { - contextLogger.Info("Refreshed configuration file", - "filename", certificateLocation, - "secret", secret.Name) - } - - privateKeyIsChanged, err := fileutils.WriteFileAtomic(privateKeyLocation, privateKey, 0o600) - if err != nil { - return false, fmt.Errorf("while writing server private key: %w", err) - } - - if privateKeyIsChanged { - contextLogger.Info("Refreshed configuration file", - "filename", privateKeyLocation, - "secret", secret.Name) - } - - return certificateIsChanged || privateKeyIsChanged, nil -} - -// refreshCAFromSecret receive a secret and rewrite the ca.crt file to the provided location -func (r *InstanceReconciler) refreshCAFromSecret( - ctx context.Context, - secret *corev1.Secret, - destLocation string, -) (bool, error) { - caCertificate, ok := secret.Data[certs.CACertKey] - if !ok { - return false, fmt.Errorf("missing %s entry in Secret", certs.CACertKey) - } - - changed, err := fileutils.WriteFileAtomic(destLocation, caCertificate, 0o600) - if err != nil { - return false, fmt.Errorf("while writing server certificate: %w", err) - } - - if changed { - log.FromContext(ctx).Info("Refreshed configuration file", - "filename", destLocation, - "secret", secret.Name) - } - - return changed, nil + return clusterstatus.PatchWithOptimisticLock( + ctx, + r.client, + cluster, + clusterstatus.SetPhase(phase, phaseReason), + clusterstatus.SetClusterReadyCondition, + ) } -// refreshFileFromSecret receive a secret and rewrite the file corresponding to the key to the provided location -func (r *InstanceReconciler) refreshFileFromSecret( - ctx context.Context, - secret *corev1.Secret, - key, destLocation string, -) (bool, error) { +// triggerRestartForDecrease triggers an in-place restart and then asks +// the operator to continue with the reconciliation. This is needed to +// apply a change in replica-sensitive parameters that need to be done +// on the primary node and, after that, to the replicas +func (r *InstanceReconciler) triggerRestartForDecrease(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) - data, ok := secret.Data[key] - if !ok { - return false, fmt.Errorf("missing %s entry in Secret", key) - } - changed, err := fileutils.WriteFileAtomic(destLocation, data, 0o600) - if err != nil { - return false, fmt.Errorf("while writing file: %w", err) + contextLogger.Info("Restarting primary in-place due to hot standby sensible parameters decrease") + if err := r.Instance().RequestAndWaitRestartSmartFast(ctx, cluster.GetRestartTimeout()); err != nil { + return err } - if changed { - contextLogger.Info("Refreshed configuration file", - "filename", destLocation, - "secret", secret.Name, - "key", key) - } + phase := apiv1.PhaseApplyingConfiguration + phaseReason := "Decrease of hot standby sensitive parameters" - return changed, nil + return clusterstatus.PatchWithOptimisticLock( + ctx, + r.client, + cluster, + clusterstatus.SetPhase(phase, phaseReason), + clusterstatus.SetClusterReadyCondition, + ) } // Reconciler primary logic. DB needed. func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) - if cluster.Status.TargetPrimary != r.instance.PodName || cluster.IsReplica() { + if cluster.Status.TargetPrimary != r.instance.GetPodName() || cluster.IsReplica() { return nil } @@ -1206,9 +1112,9 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv } // if the currentPrimary doesn't match the PodName we set the correct value. - if cluster.Status.CurrentPrimary != r.instance.PodName { - cluster.Status.CurrentPrimary = r.instance.PodName - cluster.Status.CurrentPrimaryTimestamp = pkgUtils.GetCurrentTimestamp() + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + cluster.Status.CurrentPrimary = r.instance.GetPodName() + cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if err := r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil { return err @@ -1238,10 +1144,10 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv func (r *InstanceReconciler) handlePromotion(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) contextLogger.Info("I'm the target primary, wait for the wal_receiver to be terminated") - if r.instance.PodName != cluster.Status.CurrentPrimary { + if r.instance.GetPodName() != cluster.Status.CurrentPrimary { // if the cluster is not replicating it means it's doing a failover and // we have to wait for wal receivers to be down - err := r.waitForWalReceiverDown() + err := r.waitForWalReceiverDown(ctx) if err != nil { return err } @@ -1262,7 +1168,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( cluster *apiv1.Cluster, ) (changed bool, err error) { // If I'm already the current designated primary everything is ok. - if cluster.Status.CurrentPrimary == r.instance.PodName && !r.instance.RequiresDesignatedPrimaryTransition { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() && !r.instance.RequiresDesignatedPrimaryTransition { return false, nil } @@ -1275,18 +1181,32 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( // I'm the primary, need to inform the operator log.FromContext(ctx).Info("Setting myself as the current designated primary") - oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.PodName - cluster.Status.CurrentPrimaryTimestamp = pkgUtils.GetCurrentTimestamp() - if r.instance.RequiresDesignatedPrimaryTransition { - externalcluster.SetDesignatedPrimaryTransitionCompleted(cluster) - } - return changed, r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)) + return changed, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingCluster apiv1.Cluster + + err := r.client.Get(ctx, client.ObjectKeyFromObject(cluster), &livingCluster) + if err != nil { + return err + } + + updatedCluster := livingCluster.DeepCopy() + updatedCluster.Status.CurrentPrimary = r.instance.GetPodName() + updatedCluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() + if r.instance.RequiresDesignatedPrimaryTransition { + externalcluster.SetDesignatedPrimaryTransitionCompleted(updatedCluster) + } + + cluster.Status = updatedCluster.Status + + return r.client.Status().Update(ctx, updatedCluster) + }) } // waitForWalReceiverDown wait until the wal receiver is down, and it's used // to grab all the WAL files from a replica -func (r *InstanceReconciler) waitForWalReceiverDown() error { +func (r *InstanceReconciler) waitForWalReceiverDown(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + // This is not really exponential backoff as RetryUntilWalReceiverDown // doesn't contain any increment return wait.ExponentialBackoff(RetryUntilWalReceiverDown, func() (done bool, err error) { @@ -1299,7 +1219,7 @@ func (r *InstanceReconciler) waitForWalReceiverDown() error { return true, nil } - log.Info("WAL receiver is still active, waiting") + contextLogger.Info("WAL receiver is still active, waiting") return false, nil }) } @@ -1350,7 +1270,7 @@ func (r *InstanceReconciler) reconcileUser(ctx context.Context, username string, var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: secretName}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretName}, &secret) if err != nil { if apierrors.IsNotFound(err) { @@ -1393,7 +1313,7 @@ func (r *InstanceReconciler) refreshPGHBA(ctx context.Context, cluster *apiv1.Cl err := r.GetClient().Get(ctx, types.NamespacedName{ Name: ldapSecretName, - Namespace: r.instance.Namespace, + Namespace: r.instance.GetNamespaceName(), }, &ldapBindPasswordSecret) if err != nil { return false, err @@ -1406,10 +1326,15 @@ func (r *InstanceReconciler) refreshPGHBA(ctx context.Context, cluster *apiv1.Cl ldapBindPassword = string(ldapBindPasswordByte) } // Generate pg_hba.conf file - return r.instance.RefreshPGHBA(cluster, ldapBindPassword) + return r.instance.RefreshPGHBA(ctx, cluster, ldapBindPassword) } -func (r *InstanceReconciler) shouldRequeueForMissingTopology(cluster *apiv1.Cluster) shoudRequeue { +func (r *InstanceReconciler) shouldRequeueForMissingTopology( + ctx context.Context, + cluster *apiv1.Cluster, +) shoudRequeue { + contextLogger := log.FromContext(ctx) + syncReplicaConstraint := cluster.Spec.PostgresConfiguration.SyncReplicaElectionConstraint if !syncReplicaConstraint.Enabled { return false @@ -1420,7 +1345,7 @@ func (r *InstanceReconciler) shouldRequeueForMissingTopology(cluster *apiv1.Clus topologyStatus := cluster.Status.Topology if !topologyStatus.SuccessfullyExtracted || len(topologyStatus.Instances) != cluster.Spec.Instances { - log.Info("missing topology information while syncReplicaElectionConstraint are enabled, " + + contextLogger.Info("missing topology information while syncReplicaElectionConstraint are enabled, " + "will requeue to calculate correctly the synchronous names") return true } @@ -1454,7 +1379,7 @@ func (r *InstanceReconciler) dropStaleReplicationConnections( return ctrl.Result{}, nil } - if cluster.Status.CurrentPrimary == r.instance.PodName { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() { return ctrl.Result{}, nil } @@ -1465,8 +1390,8 @@ func (r *InstanceReconciler) dropStaleReplicationConnections( result, err := conn.ExecContext( ctx, - `SELECT pg_terminate_backend(pid) - FROM pg_stat_replication + `SELECT pg_catalog.pg_terminate_backend(pid) + FROM pg_catalog.pg_stat_replication WHERE application_name LIKE $1`, fmt.Sprintf("%v-%%", cluster.Name), ) diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index 1bc852613d..1b434d95c1 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,156 +13,30 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( "context" + "errors" "fmt" - "io/fs" "os" "path/filepath" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" - postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - pkgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -// refreshServerCertificateFiles gets the latest server certificates files from the -// secrets, and may set the instance certificate if it was missing our outdated. -// Returns true if configuration has been changed or the instance has been updated -func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - contextLogger := log.FromContext(ctx) - - var secret corev1.Secret - - err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, - func() error { - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerTLSSecret}, - &secret) - if err != nil { - contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.", - "secret", cluster.Status.Certificates.ServerTLSSecret) - return err - } - return nil - }) - if err != nil { - return false, err - } - - changed, err := r.refreshCertificateFilesFromSecret( - ctx, - &secret, - postgresSpec.ServerCertificateLocation, - postgresSpec.ServerKeyLocation) - if err != nil { - return changed, err - } - - if r.instance.ServerCertificate == nil || changed { - return changed, r.refreshInstanceCertificateFromSecret(&secret) - } - - return changed, nil -} - -// refreshReplicationUserCertificate gets the latest replication certificates from the -// secrets. Returns true if configuration has been changed -func (r *InstanceReconciler) refreshReplicationUserCertificate( - ctx context.Context, - cluster *apiv1.Cluster, -) (bool, error) { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ReplicationTLSSecret}, - &secret) - if err != nil { - return false, err - } - - return r.refreshCertificateFilesFromSecret( - ctx, - &secret, - postgresSpec.StreamingReplicaCertificateLocation, - postgresSpec.StreamingReplicaKeyLocation) -} - -// refreshClientCA gets the latest client CA certificates from the secrets. -// It returns true if configuration has been changed -func (r *InstanceReconciler) refreshClientCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ClientCASecret}, - &secret) - if err != nil { - return false, err - } - - return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ClientCACertificateLocation) -} - -// refreshServerCA gets the latest server CA certificates from the secrets. -// It returns true if configuration has been changed -func (r *InstanceReconciler) refreshServerCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerCASecret}, - &secret) - if err != nil { - return false, err - } - - return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ServerCACertificateLocation) -} - -// refreshBarmanEndpointCA gets the latest barman endpoint CA certificates from the secrets. -// It returns true if configuration has been changed -func (r *InstanceReconciler) refreshBarmanEndpointCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - endpointCAs := map[string]*apiv1.SecretKeySelector{} - if cluster.Spec.Backup.IsBarmanEndpointCASet() { - endpointCAs[postgresSpec.BarmanBackupEndpointCACertificateLocation] = cluster.Spec.Backup.BarmanObjectStore.EndpointCA - } - if replicaBarmanCA := cluster.GetBarmanEndpointCAForReplicaCluster(); replicaBarmanCA != nil { - endpointCAs[postgresSpec.BarmanRestoreEndpointCACertificateLocation] = replicaBarmanCA - } - if len(endpointCAs) == 0 { - return false, nil - } - - var changed bool - for target, secretKeySelector := range endpointCAs { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: secretKeySelector.Name}, - &secret) - if err != nil { - return false, err - } - c, err := r.refreshFileFromSecret(ctx, &secret, secretKeySelector.Key, target) - changed = changed || c - if err != nil { - return changed, err - } - } - return changed, nil -} - // verifyPgDataCoherenceForPrimary will abort the execution if the current server is a primary // one from the PGDATA viewpoint, but is not classified as the target nor the // current primary @@ -194,7 +69,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context "of the cluster is resumed, demoting immediately") return r.instance.Demote(ctx, cluster) - case targetPrimary == r.instance.PodName: + case targetPrimary == r.instance.GetPodName(): if currentPrimary == "" { // This means that this cluster has been just started up and the // current primary still need to be written @@ -203,8 +78,8 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context "targetPrimary", targetPrimary) oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.PodName - cluster.Status.CurrentPrimaryTimestamp = pkgUtils.GetCurrentTimestamp() + cluster.Status.CurrentPrimary = r.instance.GetPodName() + cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() return r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)) } return nil @@ -236,11 +111,6 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context return err } - pgMajorVersion, err := cluster.GetPostgresqlMajorVersion() - if err != nil { - return err - } - // Clean up any stale pid file before executing pg_rewind err = r.instance.CleanUpStalePid() if err != nil { @@ -256,31 +126,20 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context err, "Error while changing mode of the postgresql.auto.conf file before pg_rewind, skipped") } - // pg_rewind could require a clean shutdown of the old primary to - // work. Unfortunately, if the old primary is already clean starting - // it up may make it advance in respect to the new one. - // The only way to check if we really need to start it up before - // invoking pg_rewind is to try using pg_rewind and, on failures, - // retrying after having started up the instance. - err = r.instance.Rewind(ctx, pgMajorVersion) - if err != nil { - contextLogger.Info( - "pg_rewind failed, starting the server to complete the crash recovery", - "err", err) - - // pg_rewind requires a clean shutdown of the old primary to work. - // The only way to do that is to start the server again - // and wait for it to be available again. - err = r.instance.CompleteCrashRecovery(ctx) - if err != nil { - return err + // We archive every WAL that have not been archived from the latest postmaster invocation. + if err := archiver.ArchiveAllReadyWALs(ctx, cluster, r.instance.PgData); err != nil { + var missingPluginError archiver.ErrMissingWALArchiverPlugin + if errors.As(err, &missingPluginError) { + // The instance initialization resulted in a fatal error. + // We need the Pod to be rolled out to install the archiving plugin. + r.systemInitialization.BroadcastError(err) } + return fmt.Errorf("while ensuring all WAL files are archived: %w", err) + } - // Then let's go back to the point of the new primary - err = r.instance.Rewind(ctx, pgMajorVersion) - if err != nil { - return err - } + err = r.instance.Rewind(ctx) + if err != nil { + return fmt.Errorf("while executing pg_rewind: %w", err) } // Now I can demote myself @@ -288,47 +147,6 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context } } -// ReconcileWalStorage moves the files from PGDATA/pg_wal to the volume attached, if exists, and -// creates a symlink for it -func (r *InstanceReconciler) ReconcileWalStorage(ctx context.Context) error { - contextLogger := log.FromContext(ctx) - - if pgWalExists, err := fileutils.FileExists(specs.PgWalVolumePath); err != nil { - return err - } else if !pgWalExists { - return nil - } - - pgWalDirInfo, err := os.Lstat(specs.PgWalPath) - if err != nil { - return err - } - // The pgWalDir it's already a symlink meaning that there's nothing to do - mode := pgWalDirInfo.Mode() & fs.ModeSymlink - if !pgWalDirInfo.IsDir() && mode != 0 { - return nil - } - - // We discarded every possibility that this has been done, let's move the current file to their - // new location - contextLogger.Info("Moving data", "from", specs.PgWalPath, "to", specs.PgWalVolumePgWalPath) - if err := fileutils.MoveDirectoryContent(specs.PgWalPath, specs.PgWalVolumePgWalPath); err != nil { - contextLogger.Error(err, "Moving data", "from", specs.PgWalPath, "to", - specs.PgWalVolumePgWalPath) - return err - } - - contextLogger.Debug("Deleting old path", "path", specs.PgWalPath) - if err := fileutils.RemoveFile(specs.PgWalPath); err != nil { - contextLogger.Error(err, "Deleting old path", "path", specs.PgWalPath) - return err - } - - // We moved all the files now we should create the proper symlink - contextLogger.Debug("Creating symlink", "from", specs.PgWalPath, "to", specs.PgWalVolumePgWalPath) - return os.Symlink(specs.PgWalVolumePgWalPath, specs.PgWalPath) -} - // ReconcileTablespaces ensures the mount points created for the tablespaces // are there, and creates a subdirectory in each of them, which will therefore // be owned by the `postgres` user (rather than `root` as the mount point), @@ -349,12 +167,12 @@ func (r *InstanceReconciler) ReconcileTablespaces( mountPoint := specs.MountForTablespace(tbsName) if tbsMount, err := fileutils.FileExists(mountPoint); err != nil { contextLogger.Error(err, "while checking for mountpoint", "instance", - r.instance.PodName, "tablespace", tbsName) + r.instance.GetPodName(), "tablespace", tbsName) return err } else if !tbsMount { contextLogger.Error(fmt.Errorf("mountpoint not found"), "mountpoint for tablespaces is missing", - "instance", r.instance.PodName, "tablespace", tbsName) + "instance", r.instance.GetPodName(), "tablespace", tbsName) continue } @@ -369,7 +187,7 @@ func (r *InstanceReconciler) ReconcileTablespaces( if err != nil { contextLogger.Error(err, "could not create data dir in tablespace mount", - "instance", r.instance.PodName, "tablespace", tbsName) + "instance", r.instance.GetPodName(), "tablespace", tbsName) return fmt.Errorf("while creating data dir in tablespace %s: %w", mountPoint, err) } } diff --git a/internal/management/controller/instance_sync.go b/internal/management/controller/instance_sync.go new file mode 100644 index 0000000000..6d9809fc5a --- /dev/null +++ b/internal/management/controller/instance_sync.go @@ -0,0 +1,123 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// resetFailoverQuorumObject resets the content of the sync quorum object +// to prevent unsafe failovers when we are changing the configuration +func (r *InstanceReconciler) resetFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) error { + if !r.shouldManageFailoverQuorumObject(ctx, cluster) { + return nil + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingQuorumStatus apiv1.FailoverQuorum + + err := r.client.Get(ctx, client.ObjectKeyFromObject(cluster), &livingQuorumStatus) + if err != nil { + return err + } + + livingQuorumStatus.Status = apiv1.FailoverQuorumStatus{} + return r.client.Status().Update(ctx, &livingQuorumStatus) + }) +} + +// updateFailoverQuorumObject updates the sync quorum object reading the +// current synchronous replica metadata from the PG instance +func (r *InstanceReconciler) updateFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) error { + if !r.shouldManageFailoverQuorumObject(ctx, cluster) { + return nil + } + + metadata, err := r.Instance().GetSynchronousReplicationMetadata(ctx) + if err != nil { + return err + } + + newStatus := apiv1.FailoverQuorumStatus{} + if metadata != nil { + newStatus.Method = metadata.Method + newStatus.Primary = r.instance.GetPodName() + newStatus.StandbyNumber = metadata.NumSync + + // We ensure the primary is not included in the standby names + newStatus.StandbyNames = make([]string, 0, len(metadata.StandbyNames)) + for _, name := range metadata.StandbyNames { + if name == newStatus.Primary { + continue + } + newStatus.StandbyNames = append(newStatus.StandbyNames, name) + } + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingQuorumStatus apiv1.FailoverQuorum + + err := r.client.Get(ctx, client.ObjectKeyFromObject(cluster), &livingQuorumStatus) + if err != nil { + return err + } + + if equality.Semantic.DeepEqual(livingQuorumStatus.Status, newStatus) { + return nil + } + + updatedQuorumStatus := livingQuorumStatus.DeepCopy() + updatedQuorumStatus.Status = newStatus + return r.client.Status().Update(ctx, updatedQuorumStatus) + }) +} + +func (r *InstanceReconciler) shouldManageFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) bool { + contextLogger := log.FromContext(ctx) + + if cluster.Status.TargetPrimary != r.instance.GetPodName() { + return false + } + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + return false + } + if cluster.Spec.PostgresConfiguration.Synchronous == nil { + return false + } + + failoverQuorumActive, err := cluster.IsFailoverQuorumActive() + if err != nil { + contextLogger.Error(err, "Failed to determine if sync quorum is active") + failoverQuorumActive = false + } + + if !failoverQuorumActive { + return false + } + + return true +} diff --git a/internal/management/controller/instance_token.go b/internal/management/controller/instance_token.go index f1f74f52a8..dbc94e90ed 100644 --- a/internal/management/controller/instance_token.go +++ b/internal/management/controller/instance_token.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -72,9 +75,9 @@ func (r *InstanceReconciler) verifyPromotionToken(cluster *apiv1.Cluster) error } parsedControlData := utils.ParsePgControldataOutput(out) - currentTimelineIDString := parsedControlData[utils.PgControlDataKeyLatestCheckpointTimelineID] - currentSystemIdentifier := parsedControlData[utils.PgControlDataKeyDatabaseSystemIdentifier] - replayLSNString := parsedControlData[utils.PgControlDataKeyLatestCheckpointREDOLocation] + currentTimelineIDString := parsedControlData.GetLatestCheckpointTimelineID() + currentSystemIdentifier := parsedControlData.GetDatabaseSystemIdentifier() + replayLSNString := parsedControlData.GetLatestCheckpointREDOLocation() return promotiontoken.ValidateAgainstInstanceStatus(promotionToken, currentSystemIdentifier, currentTimelineIDString, replayLSNString) diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index 9ea68245ae..76929fef62 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the functions in PostgreSQL instance manager @@ -28,9 +31,11 @@ import ( ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/concurrency" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" + instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" ) // InstanceReconciler reconciles the status of the Cluster resource with @@ -46,6 +51,9 @@ type InstanceReconciler struct { systemInitialization *concurrency.Executed firstReconcileDone atomic.Bool metricsServerExporter *metricserver.Exporter + + certificateReconciler *instancecertificate.Reconciler + pluginRepository repository.Interface } // NewInstanceReconciler creates a new instance reconciler @@ -53,6 +61,7 @@ func NewInstanceReconciler( instance *postgres.Instance, client ctrl.Client, metricsExporter *metricserver.Exporter, + pluginRepository repository.Interface, ) *InstanceReconciler { return &InstanceReconciler{ instance: instance, @@ -61,6 +70,8 @@ func NewInstanceReconciler( extensionStatus: make(map[string]bool), systemInitialization: concurrency.NewExecuted(), metricsServerExporter: metricsExporter, + certificateReconciler: instancecertificate.NewReconciler(client, instance), + pluginRepository: pluginRepository, } } @@ -82,18 +93,7 @@ func (r *InstanceReconciler) Instance() *postgres.Instance { // GetCluster gets the managed cluster through the client func (r *InstanceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { - var cluster apiv1.Cluster - err := r.GetClient().Get(ctx, - types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, - }, - &cluster) - if err != nil { - return nil, err - } - - return &cluster, nil + return getClusterFromInstance(ctx, r.client, r.instance) } // GetSecret will get a named secret in the instance namespace @@ -102,7 +102,7 @@ func (r *InstanceReconciler) GetSecret(ctx context.Context, name string) (*corev err := r.GetClient().Get(ctx, types.NamespacedName{ Name: name, - Namespace: r.instance.Namespace, + Namespace: r.instance.GetNamespaceName(), }, &secret) if err != nil { return nil, fmt.Errorf("while getting secret: %w", err) diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go new file mode 100644 index 0000000000..1b32ef99b4 --- /dev/null +++ b/internal/management/controller/publication_controller.go @@ -0,0 +1,202 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// PublicationReconciler reconciles a Publication object +type PublicationReconciler struct { + client.Client + Scheme *runtime.Scheme + + instance *postgres.Instance + finalizerReconciler *finalizerReconciler[*apiv1.Publication] + getDB func(name string) (*sql.DB, error) +} + +// publicationReconciliationInterval is the time between the +// publication reconciliation loop failures +const publicationReconciliationInterval = 30 * time.Second + +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=publications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=publications/status,verbs=get;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Publication object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx). + WithName("publication_reconciler"). + WithValues("publicationName", req.Name) + + // Get the publication object + var publication apiv1.Publication + if err := r.Get(ctx, client.ObjectKey{ + Namespace: req.Namespace, + Name: req.Name, + }, &publication); err != nil { + contextLogger.Trace("Could not fetch Publication", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // This is not for me! + if publication.Spec.ClusterRef.Name != r.instance.GetClusterName() { + contextLogger.Trace("Publication is not for this cluster", + "cluster", publication.Spec.ClusterRef.Name, + "expected", r.instance.GetClusterName(), + ) + return ctrl.Result{}, nil + } + + // If everything is reconciled, we're done here + if publication.Generation == publication.Status.ObservedGeneration { + return ctrl.Result{}, nil + } + + // Fetch the Cluster from the cache + cluster, err := r.GetCluster(ctx) + if err != nil { + return ctrl.Result{}, markAsFailed(ctx, r.Client, &publication, fmt.Errorf("while fetching the cluster: %w", err)) + } + + // Still not for me, we're waiting for a switchover + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + // This is not for me, at least now + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + contextLogger.Info("Reconciling publication") + defer func() { + contextLogger.Info("Reconciliation loop of publication exited") + }() + + // Cannot do anything on a replica cluster + if cluster.IsReplica() { + if err := markAsUnknown(ctx, r.Client, &publication, errClusterIsReplica); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + if res, err := detectConflictingManagers(ctx, r.Client, &publication, &apiv1.PublicationList{}); err != nil || + !res.IsZero() { + return res, err + } + + if err := r.finalizerReconciler.reconcile(ctx, &publication); err != nil { + return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) + } + if !publication.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil + } + + if err := r.alignPublication(ctx, &publication); err != nil { + contextLogger.Error(err, "while reconciling publication") + if markErr := markAsFailed(ctx, r.Client, &publication, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the publication resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the publication resource: %w, original error: %w", + markErr, + err) + } + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + contextLogger.Info("Reconciliation of publication completed") + if err := markAsReady(ctx, r.Client, &publication); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil +} + +func (r *PublicationReconciler) evaluateDropPublication(ctx context.Context, pub *apiv1.Publication) error { + if pub.Spec.ReclaimPolicy != apiv1.PublicationReclaimDelete { + return nil + } + db, err := r.getDB(pub.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + + return executeDropPublication(ctx, db, pub.Spec.Name) +} + +// NewPublicationReconciler creates a new publication reconciler +func NewPublicationReconciler( + mgr manager.Manager, + instance *postgres.Instance, +) *PublicationReconciler { + pr := &PublicationReconciler{ + Client: mgr.GetClient(), + instance: instance, + getDB: func(name string) (*sql.DB, error) { + return instance.ConnectionPool().Connection(name) + }, + } + + pr.finalizerReconciler = newFinalizerReconciler( + mgr.GetClient(), + utils.PublicationFinalizerName, + pr.evaluateDropPublication, + ) + + return pr +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PublicationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&apiv1.Publication{}). + Named("instance-publication"). + Complete(r) +} + +// GetCluster gets the managed cluster through the client +func (r *PublicationReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { + return getClusterFromInstance(ctx, r.Client, r.instance) +} diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go new file mode 100644 index 0000000000..99c182c329 --- /dev/null +++ b/internal/management/controller/publication_controller_sql.go @@ -0,0 +1,196 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/jackc/pgx/v5" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +func (r *PublicationReconciler) alignPublication(ctx context.Context, obj *apiv1.Publication) error { + db, err := r.getDB(obj.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + + row := db.QueryRowContext( + ctx, + ` + SELECT count(*) + FROM pg_catalog.pg_publication + WHERE pubname = $1 + `, + obj.Spec.Name) + if row.Err() != nil { + return fmt.Errorf("while getting publication status: %w", row.Err()) + } + + var count int + if err := row.Scan(&count); err != nil { + return fmt.Errorf("while getting publication status (scan): %w", err) + } + + if count > 0 { + if err := r.patchPublication(ctx, db, obj); err != nil { + return fmt.Errorf("while patching publication: %w", err) + } + return nil + } + + if err := r.createPublication(ctx, db, obj); err != nil { + return fmt.Errorf("while creating publication: %w", err) + } + + return nil +} + +func (r *PublicationReconciler) patchPublication( + ctx context.Context, + db *sql.DB, + obj *apiv1.Publication, +) error { + sqls := toPublicationAlterSQL(obj) + for _, sqlQuery := range sqls { + if _, err := db.ExecContext(ctx, sqlQuery); err != nil { + return err + } + } + + return nil +} + +func (r *PublicationReconciler) createPublication( + ctx context.Context, + db *sql.DB, + obj *apiv1.Publication, +) error { + sqlQuery := toPublicationCreateSQL(obj) + _, err := db.ExecContext(ctx, sqlQuery) + return err +} + +func toPublicationCreateSQL(obj *apiv1.Publication) string { + createQuery := fmt.Sprintf( + "CREATE PUBLICATION %s %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPublicationTargetSQL(&obj.Spec.Target), + ) + if len(obj.Spec.Parameters) > 0 { + createQuery = fmt.Sprintf("%s WITH (%s)", createQuery, toPostgresParameters(obj.Spec.Parameters)) + } + + return createQuery +} + +func toPublicationAlterSQL(obj *apiv1.Publication) []string { + result := make([]string, 0, 2) + + if len(obj.Spec.Target.Objects) > 0 { + result = append(result, + fmt.Sprintf( + "ALTER PUBLICATION %s SET %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPublicationTargetObjectsSQL(&obj.Spec.Target), + ), + ) + } + + if len(obj.Spec.Parameters) > 0 { + result = append(result, + fmt.Sprintf( + "ALTER PUBLICATION %s SET (%s)", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPostgresParameters(obj.Spec.Parameters), + ), + ) + } + + return result +} + +func executeDropPublication(ctx context.Context, db *sql.DB, name string) error { + if _, err := db.ExecContext( + ctx, + fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{name}.Sanitize()), + ); err != nil { + return fmt.Errorf("while dropping publication: %w", err) + } + + return nil +} + +func toPublicationTargetSQL(obj *apiv1.PublicationTarget) string { + if obj.AllTables { + return "FOR ALL TABLES" + } + + result := toPublicationTargetObjectsSQL(obj) + if len(result) > 0 { + result = fmt.Sprintf("FOR %s", result) + } + return result +} + +func toPublicationTargetObjectsSQL(obj *apiv1.PublicationTarget) string { + result := "" + for _, object := range obj.Objects { + if len(result) > 0 { + result += ", " + } + result += toPublicationObjectSQL(&object) + } + + return result +} + +func toPublicationObjectSQL(obj *apiv1.PublicationTargetObject) string { + if len(obj.TablesInSchema) > 0 { + return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.TablesInSchema}.Sanitize()) + } + + result := strings.Builder{} + result.WriteString("TABLE ") + + if obj.Table.Only { + result.WriteString("ONLY ") + } + + if len(obj.Table.Schema) > 0 { + result.WriteString(fmt.Sprintf("%s.", pgx.Identifier{obj.Table.Schema}.Sanitize())) + } + + result.WriteString(pgx.Identifier{obj.Table.Name}.Sanitize()) + + if len(obj.Table.Columns) > 0 { + sanitizedColumns := make([]string, 0, len(obj.Table.Columns)) + for _, column := range obj.Table.Columns { + sanitizedColumns = append(sanitizedColumns, pgx.Identifier{column}.Sanitize()) + } + result.WriteString(fmt.Sprintf(" (%s)", strings.Join(sanitizedColumns, ", "))) + } + + return result.String() +} diff --git a/internal/management/controller/publication_controller_sql_test.go b/internal/management/controller/publication_controller_sql_test.go new file mode 100644 index 0000000000..d7635e9080 --- /dev/null +++ b/internal/management/controller/publication_controller_sql_test.go @@ -0,0 +1,228 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// nolint: dupl +package controller + +import ( + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("publication sql", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + ) + + BeforeEach(func() { + var err error + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("drops the publication successfully", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{"publication_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropPublication(ctx, db, "publication_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns an error when dropping the publication fails", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", + pgx.Identifier{"publication_name"}.Sanitize())). + WillReturnError(fmt.Errorf("drop publication error")) + + err := executeDropPublication(ctx, db, "publication_name") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("while dropping publication: drop publication error")) + }) + + It("sanitizes the publication name correctly", func(ctx SpecContext) { + dbMock.ExpectExec( + fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{"sanitized_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropPublication(ctx, db, "sanitized_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("generates correct SQL for altering publication with target objects", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + + sqls := toPublicationAlterSQL(obj) + Expect(sqls).To(ContainElement(`ALTER PUBLICATION "test_pub" SET TABLES IN SCHEMA "public"`)) + }) + + It("generates correct SQL for altering publication with parameters", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + }, + } + + sqls := toPublicationAlterSQL(obj) + Expect(sqls).To(ContainElement(`ALTER PUBLICATION "test_pub" SET ("param1" = 'value1', "param2" = 'value2')`)) + }) + + It("returns empty SQL list when no alterations are needed", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + }, + } + + sqls := toPublicationAlterSQL(obj) + Expect(sqls).To(BeEmpty()) + }) + + It("generates correct SQL for creating publication with target schema", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + + sql := toPublicationCreateSQL(obj) + Expect(sql).To(Equal(`CREATE PUBLICATION "test_pub" FOR TABLES IN SCHEMA "public"`)) + }) + + It("generates correct SQL for creating publication with target table", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {Table: &apiv1.PublicationTargetTable{Name: "table", Schema: "test", Columns: []string{"a", "b"}}}, + }, + }, + }, + } + + sql := toPublicationCreateSQL(obj) + Expect(sql).To(Equal(`CREATE PUBLICATION "test_pub" FOR TABLE "test"."table" ("a", "b")`)) + }) + + It("generates correct SQL for creating publication with parameters", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + + sql := toPublicationCreateSQL(obj) + Expect(sql).To(Equal( + `CREATE PUBLICATION "test_pub" FOR TABLES IN SCHEMA "public" WITH ("param1" = 'value1', "param2" = 'value2')`, + )) + }) +}) + +var _ = Describe("toPublicationObjectSQL", func() { + It("returns correct SQL for tables in schema", func() { + obj := &apiv1.PublicationTargetObject{ + TablesInSchema: "public", + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLES IN SCHEMA "public"`)) + }) + + It("returns correct SQL for table with schema and columns", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + Schema: "test", + Columns: []string{"a", "b"}, + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE "test"."table" ("a", "b")`)) + }) + + It("returns correct SQL for table with only clause", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + Only: true, + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE ONLY "table"`)) + }) + + It("returns correct SQL for table without schema and columns", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE "table"`)) + }) + + It("returns correct SQL for table with schema but without columns", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + Schema: "test", + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE "test"."table"`)) + }) +}) diff --git a/internal/management/controller/publication_controller_test.go b/internal/management/controller/publication_controller_test.go new file mode 100644 index 0000000000..4991ba2a19 --- /dev/null +++ b/internal/management/controller/publication_controller_test.go @@ -0,0 +1,371 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const publicationDetectionQuery = `SELECT count(*) + FROM pg_catalog.pg_publication + WHERE pubname = $1` + +var _ = Describe("Managed publication controller tests", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + publication *apiv1.Publication + cluster *apiv1.Cluster + r *PublicationReconciler + fakeClient client.Client + err error + ) + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Status: apiv1.ClusterStatus{ + CurrentPrimary: "cluster-example-1", + TargetPrimary: "cluster-example-1", + }, + } + publication = &apiv1.Publication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pub-one", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.PublicationSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + ReclaimPolicy: apiv1.PublicationReclaimDelete, + Name: "pub-all", + DBName: "app", + Target: apiv1.PublicationTarget{ + AllTables: true, + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") + + fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithObjects(cluster, publication). + WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Publication{}). + Build() + + r = &PublicationReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + r.finalizerReconciler = newFinalizerReconciler( + fakeClient, + utils.PublicationFinalizerName, + r.evaluateDropPublication, + ) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("adds finalizer and sets status ready on success", func(ctx SpecContext) { + noHits := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(noHits) + + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE PUBLICATION %s FOR ALL TABLES", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(HaveValue(BeTrue())) + Expect(publication.GetStatusMessage()).Should(BeEmpty()) + Expect(publication.GetFinalizers()).NotTo(BeEmpty()) + }) + + It("publication object inherits error after patching", func(ctx SpecContext) { + expectedError := fmt.Errorf("no permission") + oneHit := sqlmock.NewRows([]string{""}).AddRow("1") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(oneHit) + + expectedQuery := fmt.Sprintf("ALTER PUBLICATION %s SET TABLES IN SCHEMA \"public\"", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(HaveValue(BeFalse())) + Expect(publication.Status.Message).Should(ContainSubstring(expectedError.Error())) + }) + + When("reclaim policy is delete", func() { + It("on deletion it removes finalizers and drops the Publication", func(ctx SpecContext) { + // Mocking Detect publication + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking Create publication + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE PUBLICATION %s FOR ALL TABLES", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Mocking Drop Publication + expectedDrop := fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(publication.GetFinalizers()).NotTo(BeEmpty()) + Expect(publication.Status.Applied).Should(HaveValue(BeTrue())) + Expect(publication.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + publication.SetGeneration(publication.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, publication)).To(Succeed()) + + err = reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + When("reclaim policy is retain", func() { + It("on deletion it removes finalizers and does NOT drop the Publication", func(ctx SpecContext) { + publication.Spec.ReclaimPolicy = apiv1.PublicationReclaimRetain + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + // Mocking Detect publication + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking Create publication + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE PUBLICATION %s FOR ALL TABLES", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(publication.GetFinalizers()).NotTo(BeEmpty()) + Expect(publication.Status.Applied).Should(HaveValue(BeTrue())) + Expect(publication.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + publication.SetGeneration(publication.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, publication)).To(Succeed()) + + err = reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + // Since the fakeClient has the `cluster-example` cluster, let's reference + // another cluster `cluster-other` that is not found by the fakeClient + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-other-1"). + WithClusterName("cluster-other") + + r = &PublicationReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + + // Updating the publication object to reference the newly created Cluster + publication.Spec.ClusterRef.Name = "cluster-other" + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(HaveValue(BeFalse())) + Expect(publication.GetStatusMessage()).Should(ContainSubstring( + fmt.Sprintf("%q not found", publication.Spec.ClusterRef.Name))) + }) + + It("skips reconciliation if publication object isn't found (deleted publication)", func(ctx SpecContext) { + // Initialize a new Publication but without creating it in the K8S Cluster + otherPublication := &apiv1.Publication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pub-other", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.PublicationSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "pub-all", + }, + } + + // Reconcile the publication that hasn't been created in the K8S Cluster + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: otherPublication.Namespace, + Name: otherPublication.Name, + }}) + + // Expect the reconciler to exit silently, since the object doesn't exist + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(BeZero()) + }) + + It("marks as failed if the target publication is already being managed", func(ctx SpecContext) { + // Let's force the publication to have a past reconciliation + publication.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Update(ctx, publication)).To(Succeed()) + + // A new Publication Object targeting the same "pub-all" + pubDuplicate := &apiv1.Publication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pub-duplicate", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.PublicationSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "pub-all", + }, + } + + // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + Expect(fakeClient.Create(ctx, pubDuplicate)).To(Succeed()) + + err := reconcilePublication(ctx, fakeClient, r, pubDuplicate) + Expect(err).ToNot(HaveOccurred()) + + expectedError := fmt.Sprintf("%q is already managed by object %q", + pubDuplicate.Spec.Name, publication.Name) + Expect(pubDuplicate.Status.Applied).To(HaveValue(BeFalse())) + Expect(pubDuplicate.Status.Message).To(ContainSubstring(expectedError)) + Expect(pubDuplicate.Status.ObservedGeneration).To(BeZero()) + }) + + It("properly signals a publication is on a replica cluster", func(ctx SpecContext) { + initialCluster := cluster.DeepCopy() + cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + } + Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(BeNil()) + Expect(publication.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) + }) +}) + +func reconcilePublication( + ctx context.Context, + fakeClient client.Client, + r *PublicationReconciler, + publication *apiv1.Publication, +) error { + GinkgoT().Helper() + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: publication.GetNamespace(), + Name: publication.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + return fakeClient.Get(ctx, client.ObjectKey{ + Namespace: publication.GetNamespace(), + Name: publication.GetName(), + }, publication) +} diff --git a/internal/management/controller/roles/contract.go b/internal/management/controller/roles/contract.go index 294023c4e5..932ac2826e 100644 --- a/internal/management/controller/roles/contract.go +++ b/internal/management/controller/roles/contract.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles import ( - "context" "database/sql" "reflect" "sort" @@ -121,25 +123,3 @@ func (d *DatabaseRole) isEquivalentTo(inSpec apiv1.RoleConfiguration) bool { return reflect.DeepEqual(role, spec) && d.hasSameValidUntilAs(inSpec) } - -// RoleManager abstracts the functionality of reconciling with PostgreSQL roles -type RoleManager interface { - // List the roles in the database - List(ctx context.Context) ([]DatabaseRole, error) - // Update the role in the database - Update(ctx context.Context, role DatabaseRole) error - // Create the role in the database - Create(ctx context.Context, role DatabaseRole) error - // Delete the role in the database - Delete(ctx context.Context, role DatabaseRole) error - // GetLastTransactionID returns the last TransactionID as the `xmin` - // from the database - // See https://www.postgresql.org/docs/current/datatype-oid.html for reference - GetLastTransactionID(ctx context.Context, role DatabaseRole) (int64, error) - // UpdateComment Update the comment of role in the database - UpdateComment(ctx context.Context, role DatabaseRole) error - // UpdateMembership Update the In Role membership of role in the database - UpdateMembership(ctx context.Context, role DatabaseRole, rolesToGrant []string, rolesToRevoke []string) error - // GetParentRoles returns the roles the given role is a member of - GetParentRoles(ctx context.Context, role DatabaseRole) ([]string, error) -} diff --git a/internal/management/controller/roles/contract_test.go b/internal/management/controller/roles/contract_test.go index 10c6e5e1a9..3bdac7506d 100644 --- a/internal/management/controller/roles/contract_test.go +++ b/internal/management/controller/roles/contract_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/doc.go b/internal/management/controller/roles/doc.go index d2e69fec67..1d302ba207 100644 --- a/internal/management/controller/roles/doc.go +++ b/internal/management/controller/roles/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package roles contains the code needed to reconcile roles with PostgreSQL diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index 7ef2ea2ff0..82b3b986b6 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles @@ -19,6 +22,7 @@ package roles import ( "context" "database/sql" + "errors" "fmt" "strings" @@ -28,26 +32,14 @@ import ( "github.com/lib/pq" ) -// PostgresRoleManager is a RoleManager for a database instance -type PostgresRoleManager struct { - superUserDB *sql.DB -} - -// NewPostgresRoleManager returns an implementation of RoleManager for postgres -func NewPostgresRoleManager(superDB *sql.DB) RoleManager { - return PostgresRoleManager{ - superUserDB: superDB, - } -} - // List the available roles excluding all the roles that start with `pg_` -func (sm PostgresRoleManager) List( - ctx context.Context, -) ([]DatabaseRole, error) { +func List(ctx context.Context, db *sql.DB) ([]DatabaseRole, error) { logger := log.FromContext(ctx).WithName("roles_reconciler") - wrapErr := func(err error) error { return fmt.Errorf("while listing DB roles for DRM: %w", err) } + wrapErr := func(err error) error { + return fmt.Errorf("while listing DB roles for role reconciler: %w", err) + } - rows, err := sm.superUserDB.QueryContext( + rows, err := db.QueryContext( ctx, `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls, @@ -55,8 +47,8 @@ func (sm PostgresRoleManager) List( mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname not like 'pg\_%'`) if err != nil { @@ -109,11 +101,11 @@ func (sm PostgresRoleManager) List( } // Update the role -func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) error { +func Update(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while updating role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while updating role %s with role reconciler: %w", role.Name, err) } var query strings.Builder @@ -124,7 +116,7 @@ func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) err // will change no matter what, the next reconciliation cycle we would update the password appendPasswordOption(role, &query) - _, err := sm.superUserDB.ExecContext(ctx, query.String()) + _, err := db.ExecContext(ctx, query.String()) if err != nil { return wrapErr(err) } @@ -133,11 +125,11 @@ func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) err // Create the role // TODO: do we give the role any database-level permissions? -func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) error { +func Create(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while creating role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while creating role %s with role reconciler: %w", role.Name, err) } var query strings.Builder @@ -150,7 +142,7 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err // NOTE: defensively we might think of doing CREATE ... IF EXISTS // but at least during development, we want to catch the error // Even after, this may be "the kubernetes way" - if _, err := sm.superUserDB.ExecContext(ctx, query.String()); err != nil { + if _, err := db.ExecContext(ctx, query.String()); err != nil { return wrapErr(err) } @@ -159,7 +151,7 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err query.WriteString(fmt.Sprintf("COMMENT ON ROLE %s IS %s", pgx.Identifier{role.Name}.Sanitize(), pq.QuoteLiteral(role.Comment))) - if _, err := sm.superUserDB.ExecContext(ctx, query.String()); err != nil { + if _, err := db.ExecContext(ctx, query.String()); err != nil { return wrapErr(err) } } @@ -168,16 +160,16 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err } // Delete the role -func (sm PostgresRoleManager) Delete(ctx context.Context, role DatabaseRole) error { +func Delete(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while deleting role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while deleting role %s with role reconciler: %w", role.Name, err) } query := fmt.Sprintf("DROP ROLE %s", pgx.Identifier{role.Name}.Sanitize()) contextLog.Debug("Dropping", "query", query) - _, err := sm.superUserDB.ExecContext(ctx, query) + _, err := db.ExecContext(ctx, query) if err != nil { return wrapErr(err) } @@ -187,18 +179,18 @@ func (sm PostgresRoleManager) Delete(ctx context.Context, role DatabaseRole) err // GetLastTransactionID get the last xmin for the role, to help keep track of // whether the role has been changed in on the Database since last reconciliation -func (sm PostgresRoleManager) GetLastTransactionID(ctx context.Context, role DatabaseRole) (int64, error) { +func GetLastTransactionID(ctx context.Context, db *sql.DB, role DatabaseRole) (int64, error) { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while getting last xmin for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while getting last xmin for role %s with role reconciler: %w", role.Name, err) } var xmin int64 - err := sm.superUserDB.QueryRowContext(ctx, + err := db.QueryRowContext(ctx, `SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1`, role.Name).Scan(&xmin) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { return 0, wrapErr(err) } if err != nil { @@ -209,17 +201,17 @@ func (sm PostgresRoleManager) GetLastTransactionID(ctx context.Context, role Dat } // UpdateComment of the role -func (sm PostgresRoleManager) UpdateComment(ctx context.Context, role DatabaseRole) error { +func UpdateComment(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while updating comment for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while updating comment for role %s with role reconciler: %w", role.Name, err) } query := fmt.Sprintf("COMMENT ON ROLE %s IS %s", pgx.Identifier{role.Name}.Sanitize(), pq.QuoteLiteral(role.Comment)) contextLog.Debug("Updating comment", "query", query) - _, err := sm.superUserDB.ExecContext(ctx, query) + _, err := db.ExecContext(ctx, query) if err != nil { return wrapErr(err) } @@ -232,8 +224,9 @@ func (sm PostgresRoleManager) UpdateComment(ctx context.Context, role DatabaseRo // IMPORTANT: the various REVOKE and GRANT commands that may be required to // reconcile the role will be done in a single transaction. So, if any one // of them fails, the role will not get updated -func (sm PostgresRoleManager) UpdateMembership( +func UpdateMembership( ctx context.Context, + db *sql.DB, role DatabaseRole, rolesToGrant []string, rolesToRevoke []string, @@ -241,7 +234,7 @@ func (sm PostgresRoleManager) UpdateMembership( contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while updating memberships for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while updating memberships for role %s with role reconciler: %w", role.Name, err) } if len(rolesToRevoke)+len(rolesToGrant) == 0 { contextLog.Debug("No membership change query to execute for role") @@ -261,20 +254,20 @@ func (sm PostgresRoleManager) UpdateMembership( ) } - tx, err := sm.superUserDB.BeginTx(ctx, nil) + tx, err := db.BeginTx(ctx, nil) if err != nil { return wrapErr(err) } defer func() { rollbackErr := tx.Rollback() - if rollbackErr != nil && rollbackErr != sql.ErrTxDone { + if rollbackErr != nil && !errors.Is(rollbackErr, sql.ErrTxDone) { contextLog.Error(rollbackErr, "rolling back transaction") } }() for _, sqlQuery := range queries { contextLog.Debug("Executing query", "sqlQuery", sqlQuery) - if _, err := sm.superUserDB.ExecContext(ctx, sqlQuery); err != nil { + if _, err := tx.ExecContext(ctx, sqlQuery); err != nil { contextLog.Error(err, "executing query", "sqlQuery", sqlQuery, "err", err) return wrapErr(err) } @@ -283,26 +276,23 @@ func (sm PostgresRoleManager) UpdateMembership( } // GetParentRoles get the in roles of this role -func (sm PostgresRoleManager) GetParentRoles( - ctx context.Context, - role DatabaseRole, -) ([]string, error) { +func GetParentRoles(ctx context.Context, db *sql.DB, role DatabaseRole) ([]string, error) { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while getting parents for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while getting parents for role %s with role reconciler: %w", role.Name, err) } query := `SELECT mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname = $1` contextLog.Debug("get parent role", "query", query) var parentRoles pq.StringArray - err := sm.superUserDB.QueryRowContext(ctx, query, role.Name).Scan(&parentRoles) - if err == sql.ErrNoRows { + err := db.QueryRowContext(ctx, query, role.Name).Scan(&parentRoles) + if errors.Is(err, sql.ErrNoRows) { return nil, wrapErr(err) } if err != nil { @@ -314,7 +304,13 @@ func (sm PostgresRoleManager) GetParentRoles( func appendInRoleOptions(role DatabaseRole, query *strings.Builder) { if len(role.InRoles) > 0 { - query.WriteString(fmt.Sprintf(" IN ROLE %s ", strings.Join(role.InRoles, ","))) + quotedInRoles := make([]string, len(role.InRoles)) + + for i, inRole := range role.InRoles { + quotedInRoles[i] = pgx.Identifier{inRole}.Sanitize() + } + + fmt.Fprintf(query, " IN ROLE %s ", strings.Join(quotedInRoles, ",")) } } @@ -361,12 +357,10 @@ func appendRoleOptions(role DatabaseRole, query *strings.Builder) { query.WriteString(" NOSUPERUSER") } - query.WriteString(fmt.Sprintf(" CONNECTION LIMIT %d", role.ConnectionLimit)) + fmt.Fprintf(query, " CONNECTION LIMIT %d", role.ConnectionLimit) } -func appendPasswordOption(role DatabaseRole, - query *strings.Builder, -) { +func appendPasswordOption(role DatabaseRole, query *strings.Builder) { switch { case role.ignorePassword: // Postgres may allow to set the VALID UNTIL of a role independently of @@ -375,7 +369,7 @@ func appendPasswordOption(role DatabaseRole, case !role.password.Valid: query.WriteString(" PASSWORD NULL") default: - query.WriteString(fmt.Sprintf(" PASSWORD %s", pq.QuoteLiteral(role.password.String))) + fmt.Fprintf(query, " PASSWORD %s", pq.QuoteLiteral(role.password.String)) } if role.ValidUntil.Valid { @@ -385,6 +379,6 @@ func appendPasswordOption(role DatabaseRole, } else { value = role.ValidUntil.InfinityModifier.String() } - query.WriteString(fmt.Sprintf(" VALID UNTIL %s", pq.QuoteLiteral(value))) + fmt.Fprintf(query, " VALID UNTIL %s", pq.QuoteLiteral(value)) } } diff --git a/internal/management/controller/roles/postgres_errors.go b/internal/management/controller/roles/postgres_errors.go index 92a0111ba5..587a46997e 100644 --- a/internal/management/controller/roles/postgres_errors.go +++ b/internal/management/controller/roles/postgres_errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go index d003af03f3..6a4c1fa34e 100644 --- a/internal/management/controller/roles/postgres_test.go +++ b/internal/management/controller/roles/postgres_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles @@ -104,22 +107,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { } wantedRoleExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring VALID UNTIL '2100-01-01 00:00:00Z'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" VALID UNTIL '2100-01-01 00:00:00Z'", wantedRole.Name) wantedRoleWithPassExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD 'myPassword' VALID UNTIL '2100-01-01 00:00:00Z'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD 'myPassword' VALID UNTIL '2100-01-01 00:00:00Z'", wantedRole.Name) wantedRoleWithoutValidUntilExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD 'myPassword'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD 'myPassword'", wantedRole.Name) wantedRoleWithPassDeletionExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'", wantedRole.Name) wantedRoleWithDefaultConnectionLimitExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION "+ @@ -127,37 +130,18 @@ var _ = Describe("Postgres RoleManager implementation test", func() { wantedRoleWithDefaultConnectionLimit.Name) wantedRoleCommentStmt := fmt.Sprintf( - "COMMENT ON ROLE \"%s\" IS %s", + wantedRoleCommentTpl, wantedRole.Name, pq.QuoteLiteral(wantedRole.Comment)) wantedRoleExpectedAltStmt := fmt.Sprintf( "ALTER ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 2 ", wantedRole.Name) unWantedRoleExpectedDelStmt := fmt.Sprintf("DROP ROLE \"%s\"", unWantedRole.Name) - expectedSelStmt := `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, - rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls, - pg_catalog.shobj_description(auth.oid, 'pg_authid') as comment, auth.xmin, - mem.inroles - FROM pg_catalog.pg_authid as auth - LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member - ) mem ON member = oid - WHERE rolname not like 'pg\_%'` - - expectedMembershipStmt := `SELECT mem.inroles - FROM pg_catalog.pg_authid as auth - LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member - ) mem ON member = oid - WHERE rolname = $1` // Testing List It("List can read the list of roles from the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) testDate := time.Date(2023, 4, 4, 0, 0, 0, 0, time.UTC) @@ -182,7 +166,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { }, false, []byte("This is streaming_replica user"), 22, []byte(`{"role1","role2"}`)) mock.ExpectQuery(expectedSelStmt).WillReturnRows(rows) mock.ExpectExec("CREATE ROLE foo").WillReturnResult(sqlmock.NewResult(11, 1)) - roles, err := prm.List(ctx) + roles, err := List(ctx, db) Expect(err).ShouldNot(HaveOccurred()) Expect(roles).To(HaveLen(3)) password1 := sql.NullString{ @@ -231,46 +215,42 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("List returns error if there is a problem with the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectQuery(expectedSelStmt).WillReturnError(dbError) - roles, err := prm.List(ctx) + roles, err := List(ctx, db) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(BeEquivalentTo("while listing DB roles for DRM: Kaboom")) + Expect(err.Error()).To(BeEquivalentTo("while listing DB roles for role reconciler: Kaboom")) Expect(roles).To(BeEmpty()) }) // Testing Create It("Create will send a correct CREATE to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) mock.ExpectExec(wantedRoleCommentStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Create(ctx, internalWantedRole.toDatabaseRole()) + err = Create(ctx, db, internalWantedRole.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will return error if there is a problem creating the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(wantedRoleExpectedCrtStmt). WillReturnError(dbError) - err = prm.Create(ctx, internalWantedRole.toDatabaseRole()) + err = Create(ctx, db, internalWantedRole.toDatabaseRole()) Expect(err).To(HaveOccurred()) Expect(errors.Unwrap(err)).To(BeEquivalentTo(dbError)) }) It("Create will send a correct CREATE with password to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithPassExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) @@ -281,13 +261,12 @@ var _ = Describe("Postgres RoleManager implementation test", func() { // In this unit test we are not testing the retrieval of secrets, so let's // fetch the password content by hand dbRole.password = sql.NullString{Valid: true, String: "myPassword"} - err = prm.Create(ctx, dbRole) + err = Create(ctx, db, dbRole) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will send a correct CREATE with perpetual password to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithoutValidUntilExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) @@ -300,32 +279,30 @@ var _ = Describe("Postgres RoleManager implementation test", func() { // In this unit test we are not testing the retrieval of secrets, so let's // fetch the password content by hand dbRole.password = sql.NullString{Valid: true, String: "myPassword"} - err = prm.Create(ctx, dbRole) + err = Create(ctx, db, dbRole) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithPassDeletionExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) mock.ExpectExec(wantedRoleCommentStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Create(ctx, + err = Create(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRoleWithPassDeletion}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithDefaultConnectionLimitExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Create(ctx, + err = Create(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRoleWithDefaultConnectionLimit}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) @@ -333,24 +310,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("Delete will send a correct DROP to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(unWantedRoleExpectedDelStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Delete(ctx, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) + err = Delete(ctx, db, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Delete will return error if there is a problem deleting the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(unWantedRoleExpectedDelStmt). WillReturnError(dbError) - err = prm.Delete(ctx, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) + err = Delete(ctx, db, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) Expect(err).To(HaveOccurred()) coreErr := errors.Unwrap(err) Expect(coreErr).To(BeEquivalentTo(dbError)) @@ -359,23 +334,21 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("Update will send a correct ALTER to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleExpectedAltStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Update(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = Update(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Update will return error if there is a problem updating the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(wantedRoleExpectedAltStmt). WillReturnError(dbError) - err = prm.Update(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = Update(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).To(HaveOccurred()) Expect(errors.Is(err, dbError)).To(BeTrue()) }) @@ -384,24 +357,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("UpdateComment will send a correct COMMENT to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleCommentStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.UpdateComment(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = UpdateComment(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("UpdateComment will return error if there is a problem updating the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(wantedRoleCommentStmt). WillReturnError(dbError) - err = prm.UpdateComment(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = UpdateComment(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).To(HaveOccurred()) Expect(errors.Is(err, dbError)).To(BeTrue()) }) @@ -409,7 +380,6 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("GetParentRoles will return the roles a given role belongs to", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) rows := sqlmock.NewRows([]string{ "inroles", @@ -417,7 +387,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { AddRow([]byte(`{"role1","role2"}`)) mock.ExpectQuery(expectedMembershipStmt).WithArgs("foo").WillReturnRows(rows) - roles, err := prm.GetParentRoles(ctx, DatabaseRole{Name: "foo"}) + roles, err := GetParentRoles(ctx, db, DatabaseRole{Name: "foo"}) Expect(err).ShouldNot(HaveOccurred()) Expect(roles).To(HaveLen(2)) Expect(roles).To(ConsistOf("role1", "role2")) @@ -426,10 +396,9 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("GetParentRoles will error if there is a problem querying the database", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectQuery(expectedMembershipStmt).WithArgs("foo").WillReturnError(fmt.Errorf("kaboom")) - roles, err := prm.GetParentRoles(ctx, DatabaseRole{Name: "foo"}) + roles, err := GetParentRoles(ctx, db, DatabaseRole{Name: "foo"}) Expect(err).Should(HaveOccurred()) Expect(roles).To(BeEmpty()) }) @@ -437,7 +406,6 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("UpdateMembership will send correct GRANT and REVOKE statements to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) expectedMembershipExecs := []string{ `GRANT "pg_monitor" TO "foo"`, @@ -454,14 +422,13 @@ var _ = Describe("Postgres RoleManager implementation test", func() { mock.ExpectCommit() - err = prm.UpdateMembership(ctx, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) + err = UpdateMembership(ctx, db, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) Expect(err).ShouldNot(HaveOccurred()) }) It("UpdateMembership will roll back if there is an error in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) okMembership := `GRANT "pg_monitor" TO "foo"` badMembership := `GRANT "quux" TO "foo"` @@ -474,7 +441,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { mock.ExpectRollback() - err = prm.UpdateMembership(ctx, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) + err = UpdateMembership(ctx, db, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) Expect(err).Should(HaveOccurred()) }) @@ -548,26 +515,25 @@ var _ = Describe("Postgres RoleManager implementation test", func() { Expect(queryValidUntil.String()).To(BeEquivalentTo(expectedQueryValidUntil)) }) - It("Getting the proper TransactionID per rol", func() { + It("Getting the proper TransactionID per rol", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) rows := mock.NewRows([]string{"xmin"}) lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1" dbRole := roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole() mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(errors.New("Kaboom")) - _, err = prm.GetLastTransactionID(context.TODO(), dbRole) + _, err = GetLastTransactionID(ctx, db, dbRole) Expect(err).To(HaveOccurred()) mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(sql.ErrNoRows) - _, err = prm.GetLastTransactionID(context.TODO(), dbRole) + _, err = GetLastTransactionID(ctx, db, dbRole) Expect(err).To(HaveOccurred()) rows.AddRow("1321") mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnRows(rows) - transID, err := prm.GetLastTransactionID(context.TODO(), dbRole) + transID, err := GetLastTransactionID(ctx, db, dbRole) Expect(err).ToNot(HaveOccurred()) Expect(transID).To(BeEquivalentTo(1321)) }) diff --git a/internal/management/controller/roles/reconciler.go b/internal/management/controller/roles/reconciler.go index 09190ab23e..a3c34f4518 100644 --- a/internal/management/controller/roles/reconciler.go +++ b/internal/management/controller/roles/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles @@ -56,8 +59,7 @@ func Reconcile( } contextLogger.Debug("getting the managed roles status") - roleManager := NewPostgresRoleManager(db) - rolesInDB, err := roleManager.List(ctx) + rolesInDB, err := List(ctx, db) if err != nil { return reconcile.Result{}, err } diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go index 1ee151a170..201b1733df 100644 --- a/internal/management/controller/roles/reconciler_test.go +++ b/internal/management/controller/roles/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles import ( - "context" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -30,17 +31,17 @@ import ( ) var _ = Describe("Role reconciler test", func() { - It("reconcile an empty cluster", func() { + It("reconcile an empty cluster", func(ctx SpecContext) { cluster := &v1.Cluster{} instance := &postgres.Instance{} mockClient := fake.NewClientBuilder().Build() - result, err := Reconcile(context.TODO(), instance, cluster, mockClient) + result, err := Reconcile(ctx, instance, cluster, mockClient) Expect(err).ToNot(HaveOccurred()) Expect(result).To(BeEquivalentTo(reconcile.Result{})) }) - It("reconcile fails with no database connection", func() { + It("reconcile fails with no database connection", func(ctx SpecContext) { instance := &postgres.Instance{} mockClient := fake.NewClientBuilder().Build() cluster := &v1.Cluster{ @@ -55,11 +56,11 @@ var _ = Describe("Role reconciler test", func() { }, }, } - pgStringError := "while listing DB roles for DRM: " + + pgStringError := "while listing DB roles for role reconciler: " + "failed to connect to `user=postgres database=postgres`: " + "/controller/run/.s.PGSQL.5432 (/controller/run): " + "dial error: dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory" - result, err := Reconcile(context.TODO(), instance, cluster, mockClient) + result, err := Reconcile(ctx, instance, cluster, mockClient) Expect(err.Error()).To(BeEquivalentTo(pgStringError)) Expect(result).To(BeEquivalentTo(reconcile.Result{})) }) diff --git a/internal/management/controller/roles/roles.go b/internal/management/controller/roles/roles.go index 739eaaa158..d9df3f5792 100644 --- a/internal/management/controller/roles/roles.go +++ b/internal/management/controller/roles/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index dce73d51b1..79970c064f 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles @@ -49,12 +52,21 @@ const ( roleUpdateMemberships roleAction = "UPDATE_MEMBERSHIPS" ) +type instanceInterface interface { + GetSuperUserDB() (*sql.DB, error) + IsPrimary() (bool, error) + RoleSynchronizerChan() <-chan *apiv1.ManagedConfiguration + IsReady() error + GetClusterName() string + GetNamespaceName() string +} + // A RoleSynchronizer is a Kubernetes manager.Runnable // that makes sure the Roles in the PostgreSQL databases are in sync with the spec // // c.f. https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#Runnable type RoleSynchronizer struct { - instance *postgres.Instance + instance instanceInterface client client.Client } @@ -125,21 +137,15 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Debug("reconciling managed roles") - if sr.instance.IsServerHealthy() != nil { + if err := sr.instance.IsReady(); err != nil { contextLog.Debug("database not ready, skipping roles reconciling") return nil } - superUserDB, err := sr.instance.GetSuperUserDB() - if err != nil { - return fmt.Errorf("while reconciling managed roles: %w", err) - } - roleManager := NewPostgresRoleManager(superUserDB) - var remoteCluster apiv1.Cluster if err = sr.client.Get(ctx, types.NamespacedName{ - Name: sr.instance.ClusterName, - Namespace: sr.instance.Namespace, + Name: sr.instance.GetClusterName(), + Namespace: sr.instance.GetNamespaceName(), }, &remoteCluster); err != nil { return err } @@ -148,14 +154,18 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed if rolePasswords == nil { rolePasswords = map[string]apiv1.PasswordState{} } - appliedState, irreconcilableRoles, err := sr.synchronizeRoles(ctx, roleManager, config, rolePasswords) + superUserDB, err := sr.instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting superuser connection: %w", err) + } + appliedState, irreconcilableRoles, err := sr.synchronizeRoles(ctx, superUserDB, config, rolePasswords) if err != nil { return fmt.Errorf("while syncrhonizing managed roles: %w", err) } if err = sr.client.Get(ctx, types.NamespacedName{ - Name: sr.instance.ClusterName, - Namespace: sr.instance.Namespace, + Name: sr.instance.GetClusterName(), + Namespace: sr.instance.GetNamespaceName(), }, &remoteCluster); err != nil { return err } @@ -174,33 +184,33 @@ func getRoleNames(roles []roleConfigurationAdapter) []string { } // synchronizeRoles aligns roles in the database to the spec +// It returns +// - the PasswordState for any updated roles +// - any roles that had expectable postgres errors +// - any unexpected error func (sr *RoleSynchronizer) synchronizeRoles( ctx context.Context, - roleManager RoleManager, + db *sql.DB, config *apiv1.ManagedConfiguration, storedPasswordState map[string]apiv1.PasswordState, ) (map[string]apiv1.PasswordState, map[string][]string, error) { latestSecretResourceVersion, err := getPasswordSecretResourceVersion( - ctx, sr.client, config.Roles, sr.instance.Namespace) + ctx, sr.client, config.Roles, sr.instance.GetNamespaceName()) if err != nil { return nil, nil, err } - rolesInDB, err := roleManager.List(ctx) + rolesInDB, err := List(ctx, db) if err != nil { return nil, nil, err } rolesByAction := evaluateNextRoleActions( ctx, config, rolesInDB, storedPasswordState, latestSecretResourceVersion) + + passwordStates, irreconcilableRoles, err := sr.applyRoleActions(ctx, db, rolesByAction) if err != nil { - return nil, nil, fmt.Errorf("while syncrhonizing managed roles: %w", err) + return nil, nil, err } - passwordStates, irreconcilableRoles := sr.applyRoleActions( - ctx, - roleManager, - rolesByAction, - ) - // Merge the status from database into spec. We should keep all the status // otherwise in the next loop the user without status will be marked as need update for role, stateInDatabase := range passwordStates { @@ -213,82 +223,87 @@ func (sr *RoleSynchronizer) synchronizeRoles( // It returns the apiv1.PasswordState for each role, as well as a map of roles that // cannot be reconciled for expectable errors, e.g. dropping a role owning content // -// NOTE: applyRoleActions will not error out if a single role operation fails. -// This is designed so that a role configuration that cannot be honored by PostgreSQL -// cannot stop the reconciliation loop and prevent other roles from being applied +// NOTE: applyRoleActions will carry on after an expectable error, i.e. an error +// due to an invalid request for postgres. This is so that other actions will not +// be blocked by a user error. +// It will, however, error out on unexpected errors. func (sr *RoleSynchronizer) applyRoleActions( ctx context.Context, - roleManager RoleManager, + db *sql.DB, rolesByAction rolesByAction, -) (map[string]apiv1.PasswordState, map[string][]string) { +) (map[string]apiv1.PasswordState, map[string][]string, error) { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Debug("applying role actions") irreconcilableRoles := make(map[string][]string) appliedChanges := make(map[string]apiv1.PasswordState) - handleRoleError := func(errToEvaluate error, roleName string, action roleAction) { + handleRoleError := func(errToEvaluate error, roleName string, action roleAction) error { // log unexpected errors, collect expectable PostgreSQL errors if errToEvaluate == nil { - return + return nil } roleError, err := parseRoleError(errToEvaluate, roleName, action) if err != nil { contextLog.Error(err, "while performing "+string(action), "role", roleName) - return + return err } irreconcilableRoles[roleName] = append(irreconcilableRoles[roleName], roleError.Error()) + return nil } - for action, roles := range rolesByAction { - switch action { - case roleIgnore, roleIsReconciled, roleIsReserved: - contextLog.Debug("no action required", "action", action) - continue + actionsCreateUpdate := []roleAction{roleCreate, roleUpdate} + for _, action := range actionsCreateUpdate { + for _, role := range rolesByAction[action] { + appliedState, err := sr.applyRoleCreateUpdate(ctx, db, role, action) + if err == nil { + appliedChanges[role.Name] = appliedState + } + if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil { + return nil, nil, unhandledErr + } } + } - contextLog.Info("roles in DB out of sync with Spec, evaluating action", - "roles", getRoleNames(roles), "action", action) - - for _, role := range roles { - switch action { - case roleCreate, roleUpdate: - appliedState, err := sr.applyRoleCreateUpdate(ctx, roleManager, role, action) - if err == nil { - appliedChanges[role.Name] = appliedState - } - handleRoleError(err, role.Name, action) - case roleDelete: - err := roleManager.Delete(ctx, role.toDatabaseRole()) - handleRoleError(err, role.Name, action) - case roleSetComment: - // NOTE: adding/updating a comment on a role does not alter its TransactionID - err := roleManager.UpdateComment(ctx, role.toDatabaseRole()) - handleRoleError(err, role.Name, action) - case roleUpdateMemberships: - // NOTE: revoking / granting to a role does not alter its TransactionID - dbRole := role.toDatabaseRole() - grants, revokes, err := getRoleMembershipDiff(ctx, roleManager, role, dbRole) - if err != nil { - contextLog.Error(err, "while performing "+string(action), "role", role.Name) - continue - } - err = roleManager.UpdateMembership(ctx, dbRole, grants, revokes) - handleRoleError(err, role.Name, action) - } + for _, role := range rolesByAction[roleSetComment] { + // NOTE: adding/updating a comment on a role does not alter its TransactionID + err := UpdateComment(ctx, db, role.toDatabaseRole()) + if unhandledErr := handleRoleError(err, role.Name, roleSetComment); unhandledErr != nil { + return nil, nil, unhandledErr + } + } + + for _, role := range rolesByAction[roleUpdateMemberships] { + // NOTE: revoking / granting to a role does not alter its TransactionID + dbRole := role.toDatabaseRole() + grants, revokes, err := getRoleMembershipDiff(ctx, db, role, dbRole) + if unhandledErr := handleRoleError(err, role.Name, roleUpdateMemberships); unhandledErr != nil { + return nil, nil, unhandledErr + } + + err = UpdateMembership(ctx, db, dbRole, grants, revokes) + if unhandledErr := handleRoleError(err, role.Name, roleUpdateMemberships); unhandledErr != nil { + return nil, nil, unhandledErr + } + } + + for _, role := range rolesByAction[roleDelete] { + err := Delete(ctx, db, role.toDatabaseRole()) + if unhandledErr := handleRoleError(err, role.Name, roleDelete); unhandledErr != nil { + return nil, nil, unhandledErr } } - return appliedChanges, irreconcilableRoles + return appliedChanges, irreconcilableRoles, nil } func getRoleMembershipDiff( ctx context.Context, - roleManager RoleManager, + db *sql.DB, role roleConfigurationAdapter, dbRole DatabaseRole, ) ([]string, []string, error) { - inRoleInDB, err := roleManager.GetParentRoles(ctx, dbRole) + inRoleInDB, err := GetParentRoles(ctx, db, dbRole) if err != nil { return nil, nil, err } @@ -302,7 +317,7 @@ func getRoleMembershipDiff( // Returns the PasswordState, as well as any error encountered func (sr *RoleSynchronizer) applyRoleCreateUpdate( ctx context.Context, - roleManager RoleManager, + db *sql.DB, role roleConfigurationAdapter, action roleAction, ) (apiv1.PasswordState, error) { @@ -320,7 +335,7 @@ func (sr *RoleSynchronizer) applyRoleCreateUpdate( fmt.Errorf("cannot reconcile: password both provided and disabled: %s", role.PasswordSecret.Name) case role.PasswordSecret != nil && !role.DisablePassword: - passwordSecret, err := getPassword(ctx, sr.client, role, sr.instance.Namespace) + passwordSecret, err := getPassword(ctx, sr.client, role, sr.instance.GetNamespaceName()) if err != nil { return apiv1.PasswordState{}, err } @@ -332,15 +347,15 @@ func (sr *RoleSynchronizer) applyRoleCreateUpdate( var err error switch action { case roleCreate: - err = roleManager.Create(ctx, databaseRole) + err = Create(ctx, db, databaseRole) case roleUpdate: - err = roleManager.Update(ctx, databaseRole) + err = Update(ctx, db, databaseRole) } if err != nil { return apiv1.PasswordState{}, err } - transactionID, err := roleManager.GetLastTransactionID(ctx, databaseRole) + transactionID, err := GetLastTransactionID(ctx, db, databaseRole) if err != nil { return apiv1.PasswordState{}, err } diff --git a/internal/management/controller/roles/runnable_test.go b/internal/management/controller/roles/runnable_test.go index ddd5f2414f..0b57d2d71b 100644 --- a/internal/management/controller/roles/runnable_test.go +++ b/internal/management/controller/roles/runnable_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,17 +13,25 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles import ( "context" + "database/sql" "fmt" + "time" + "github.com/DATA-DOG/go-sqlmock" "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" + "github.com/lib/pq" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -33,243 +42,87 @@ import ( . "github.com/onsi/gomega" ) -type funcCall struct{ verb, roleName string } - -type mockRoleManager struct { - roles map[string]DatabaseRole - callHistory []funcCall -} - -func (m *mockRoleManager) List(_ context.Context) ([]DatabaseRole, error) { - m.callHistory = append(m.callHistory, funcCall{"list", ""}) - re := make([]DatabaseRole, len(m.roles)) - i := 0 - for _, r := range m.roles { - re[i] = r - i++ - } - return re, nil -} - -func (m *mockRoleManager) Update( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"update", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) UpdateComment( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateComment", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update comment of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) Create( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"create", role.Name}) - _, found := m.roles[role.Name] - if found { - return fmt.Errorf("tring to create existing role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) Delete( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"delete", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to delete unknown role: %s", role.Name) - } - delete(m.roles, role.Name) - return nil -} - -func (m *mockRoleManager) GetLastTransactionID(_ context.Context, _ DatabaseRole) (int64, error) { - return 0, nil -} - -func (m *mockRoleManager) UpdateMembership( - _ context.Context, - role DatabaseRole, - _ []string, - _ []string, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateMembership", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("trying to update Role Members of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) GetParentRoles(_ context.Context, role DatabaseRole) ([]string, error) { - m.callHistory = append(m.callHistory, funcCall{"getParentRoles", role.Name}) - _, found := m.roles[role.Name] - if !found { - return nil, fmt.Errorf("trying to get parent of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil, nil -} - -// mock.ExpectExec(unWantedRoleExpectedDelStmt). -// WillReturnError(&pgconn.PgError{Code: "2BP01"}) - -type mockRoleManagerWithError struct { - roles map[string]DatabaseRole - callHistory []funcCall -} - -func (m *mockRoleManagerWithError) List(_ context.Context) ([]DatabaseRole, error) { - m.callHistory = append(m.callHistory, funcCall{"list", ""}) - re := make([]DatabaseRole, len(m.roles)) - i := 0 - for _, r := range m.roles { - re[i] = r - i++ - } - return re, nil +type fakeInstanceData struct { + *postgres.Instance + db *sql.DB } -func (m *mockRoleManagerWithError) Update( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"update", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil +func (f *fakeInstanceData) GetSuperUserDB() (*sql.DB, error) { + return f.db, nil } -func (m *mockRoleManagerWithError) UpdateComment( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateComment", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update comment of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManagerWithError) Create( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"create", role.Name}) - _, found := m.roles[role.Name] - if found { - return fmt.Errorf("tring to create existing role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManagerWithError) Delete( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"delete", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to delete unknown role: %s", role.Name) - } - return fmt.Errorf("could not delete role 'foo': %w", - &pgconn.PgError{ - Code: "2BP01", Detail: "owner of database edbDatabase", - Message: `role "dante" cannot be dropped because some objects depend on it`, +var _ = Describe("Role synchronizer tests", func() { + var ( + db *sql.DB + mock sqlmock.Sqlmock + err error + roleSynchronizer RoleSynchronizer + ) + + BeforeEach(func() { + db, mock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + DeferCleanup(func() { + Expect(mock.ExpectationsWereMet()).To(Succeed()) }) -} - -func (m *mockRoleManagerWithError) GetLastTransactionID(_ context.Context, _ DatabaseRole) (int64, error) { - return 0, nil -} -func (m *mockRoleManagerWithError) UpdateMembership( - _ context.Context, - role DatabaseRole, - _ []string, - _ []string, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateMembership", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("trying to update Role Members of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return &pgconn.PgError{Code: "42704", Message: "unknown role 'blah'"} -} - -func (m *mockRoleManagerWithError) GetParentRoles(_ context.Context, role DatabaseRole) ([]string, error) { - m.callHistory = append(m.callHistory, funcCall{"getParentRoles", role.Name}) - _, found := m.roles[role.Name] - if !found { - return nil, fmt.Errorf("trying to get parent of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil, nil -} - -var _ = Describe("Role synchronizer tests", func() { - roleSynchronizer := RoleSynchronizer{ - instance: &postgres.Instance{ - Namespace: "myPod", - }, - } + testDate := time.Date(2023, 4, 4, 0, 0, 0, 0, time.UTC) + + rowsInMockDatabase := sqlmock.NewRows([]string{ + "rolname", "rolsuper", "rolinherit", "rolcreaterole", "rolcreatedb", + "rolcanlogin", "rolreplication", "rolconnlimit", "rolpassword", "rolvaliduntil", "rolbypassrls", "comment", + "xmin", "inroles", + }). + AddRow("postgres", true, false, true, true, true, false, -1, []byte("12345"), + nil, false, []byte("This is postgres user"), 11, []byte("{}")). + AddRow("streaming_replica", false, false, true, true, false, true, 10, []byte("54321"), + pgtype.Timestamp{ + Valid: true, + Time: testDate, + InfinityModifier: pgtype.Finite, + }, false, []byte("This is streaming_replica user"), 22, []byte(`{"role1","role2"}`)). + AddRow("role_to_ignore", true, false, true, true, true, false, -1, []byte("12345"), + nil, false, []byte("This is a custom role in the DB"), 11, []byte("{}")). + AddRow("role_to_test1", true, true, false, false, false, false, -1, []byte("12345"), + nil, false, []byte("This is a role to test with"), 11, []byte("{}")). + AddRow("role_to_test2", true, true, false, false, false, false, -1, []byte("12345"), + nil, false, []byte("This is a role to test with"), 11, []byte("{inrole}")) + mock.ExpectQuery(expectedSelStmt).WillReturnRows(rowsInMockDatabase) + + roleSynchronizer = RoleSynchronizer{ + instance: &fakeInstanceData{ + Instance: postgres.NewInstance().WithNamespace("default"), + db: db, + }, + } + }) When("role configurations are realizable", func() { It("it will Create ensure:present roles in spec missing from DB", func(ctx context.Context) { + mock.ExpectExec("CREATE ROLE \"foo_bar\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT " + + "NOLOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 0"). + WillReturnResult(sqlmock.NewResult(11, 1)) managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ - { - Name: "edb_test", - Ensure: apiv1.EnsurePresent, - }, { Name: "foo_bar", Ensure: apiv1.EnsurePresent, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + rows := mock.NewRows([]string{"xmin"}).AddRow("12") + lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1" + mock.ExpectQuery(lastTransactionQuery).WithArgs("foo_bar").WillReturnRows(rows) + passwordState, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, + map[string]apiv1.PasswordState{}) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - []funcCall{ - {"list", ""}, - {"create", "edb_test"}, - {"create", "foo_bar"}, + Expect(rolesWithErrors).To(BeEmpty()) + Expect(passwordState).To(BeEquivalentTo(map[string]apiv1.PasswordState{ + "foo_bar": { + TransactionID: 12, + SecretResourceVersion: "", }, - )) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"create", "edb_test"}, - funcCall{"create", "foo_bar"}, - )) + })) }) It("it will ignore ensure:absent roles in spec missing from DB", func(ctx context.Context) { @@ -281,324 +134,255 @@ var _ = Describe("Role synchronizer tests", func() { }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{}) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""})) }) - It("it will ignore DB roles that are not in spec", func(ctx context.Context) { + It("it will call the necessary grants to update membership", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Ensure: apiv1.EnsureAbsent, - }, - }, - } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "ignorezMoi": { - Name: "ignorezMoi", + Name: "role_to_test1", Superuser: true, + Inherit: ptr.To(true), + InRoles: []string{ + "role1", + "role2", + }, + Comment: "This is a role to test with", + ConnectionLimit: -1, }, }, } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + noParents := sqlmock.NewRows([]string{"inroles"}).AddRow([]byte(`{}`)) + mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test1").WillReturnRows(noParents) + mock.ExpectBegin() + expectedMembershipExecs := []string{ + `GRANT "role1" TO "role_to_test1"`, + `GRANT "role2" TO "role_to_test1"`, + } + + for _, ex := range expectedMembershipExecs { + mock.ExpectExec(ex). + WillReturnResult(sqlmock.NewResult(2, 3)) + } + + mock.ExpectCommit() + + _, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above + }, + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""})) + Expect(rolesWithErrors).To(BeEmpty()) }) - It("it will call the updateMembership method", func(ctx context.Context) { - trueValue := true + It("it will call the necessary revokes to update membership", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Superuser: true, - Inherit: &trueValue, - InRoles: []string{ - "role1", - "role2", - }, + Name: "role_to_test2", + Superuser: true, + Inherit: ptr.To(true), + InRoles: []string{}, + Comment: "This is a role to test with", + ConnectionLimit: -1, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - }, + rows := sqlmock.NewRows([]string{ + "inroles", + }). + AddRow([]byte(`{"foo"}`)) + mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test2").WillReturnRows(rows) + mock.ExpectBegin() + + mock.ExpectExec(`REVOKE "foo" FROM "role_to_test2"`). + WillReturnResult(sqlmock.NewResult(2, 3)) + + mock.ExpectCommit() + + _, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test2": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}, - funcCall{"getParentRoles", "edb_test"}, - funcCall{"updateMembership", "edb_test"})) + Expect(rolesWithErrors).To(BeEmpty()) }) It("it will call the updateComment method", func(ctx context.Context) { - trueValue := true managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Superuser: true, - Inherit: &trueValue, - Comment: "my comment", - }, - }, - } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - Comment: "my tailor is rich", + Name: "role_to_test1", + Superuser: true, + Inherit: ptr.To(true), + Comment: "my comment", + ConnectionLimit: -1, }, }, } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + wantedRoleCommentStmt := fmt.Sprintf( + wantedRoleCommentTpl, + managedConf.Roles[0].Name, pq.QuoteLiteral(managedConf.Roles[0].Comment)) + mock.ExpectExec(wantedRoleCommentStmt).WillReturnResult(sqlmock.NewResult(2, 3)) + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above + }, + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}, - funcCall{"updateComment", "edb_test"})) }) It("it will no-op if the roles are reconciled", func(ctx context.Context) { - trueValue := true managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Superuser: true, - Inherit: &trueValue, + Name: "role_to_test1", + Superuser: true, + Inherit: ptr.To(true), + Comment: "This is a role to test with", + ConnectionLimit: -1, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - }, + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""})) }) It("it will Delete ensure:absent roles that are in the DB", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", + Name: "role_to_test1", Ensure: apiv1.EnsureAbsent, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, + roleDeletionStmt := fmt.Sprintf("DROP ROLE \"%s\"", "role_to_test1") + mock.ExpectExec(roleDeletionStmt).WillReturnResult(sqlmock.NewResult(2, 3)) + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"delete", "edb_test"}, - )) }) It("it will Update ensure:present roles that are in the DB but have different fields", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Ensure: apiv1.EnsurePresent, - CreateDB: true, - BypassRLS: true, + Name: "role_to_test1", + Superuser: false, + Inherit: ptr.To(false), + Comment: "This is a role to test with", + BypassRLS: true, + CreateRole: true, + Login: true, + ConnectionLimit: 2, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, + alterStmt := fmt.Sprintf( + "ALTER ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 2 ", + "role_to_test1") + mock.ExpectExec(alterStmt).WillReturnResult(sqlmock.NewResult(2, 3)) + rows := mock.NewRows([]string{"xmin"}).AddRow("12") + lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1" + mock.ExpectQuery(lastTransactionQuery).WithArgs("role_to_test1").WillReturnRows(rows) + passwordState, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, + map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, - }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"update", "edb_test"}, - )) + Expect(rolesWithErrors).To(BeEmpty()) + Expect(passwordState).To(BeEquivalentTo(map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 12, + SecretResourceVersion: "", + }, + })) }) }) When("role configurations are unrealizable", func() { - It("it will record that updateMembership could not succeed", func(ctx context.Context) { - trueValue := true + It("it will carry on and capture postgres errors per role", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", + Name: "role_to_test1", Superuser: true, - Inherit: &trueValue, + Inherit: ptr.To(true), InRoles: []string{ "role1", "role2", }, + Comment: "This is a role to test with", + ConnectionLimit: -1, }, - }, - } - rm := mockRoleManagerWithError{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - }, - }, - } - _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) - Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}, - funcCall{"getParentRoles", "edb_test"}, - funcCall{"updateMembership", "edb_test"})) - Expect(unrealizable).To(HaveLen(1)) - Expect(unrealizable["edb_test"]).To(HaveLen(1)) - Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo( - "could not perform UPDATE_MEMBERSHIPS on role edb_test: unknown role 'blah'")) - }) - - It("it will record that Delete could not succeed", func(ctx context.Context) { - managedConf := apiv1.ManagedConfiguration{ - Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", + Name: "role_to_test2", Ensure: apiv1.EnsureAbsent, }, }, } - rm := mockRoleManagerWithError{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, - }, + + noParents := sqlmock.NewRows([]string{"inroles"}).AddRow([]byte(`{}`)) + mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test1").WillReturnRows(noParents) + mock.ExpectBegin() + + mock.ExpectExec(`GRANT "role1" TO "role_to_test1"`). + WillReturnResult(sqlmock.NewResult(2, 3)) + + impossibleGrantError := pgconn.PgError{ + Code: "0LP01", // 0LP01 -> invalid_grant_operation + Message: "unknown role 'role2'", } - _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) - Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"delete", "edb_test"}, - )) - Expect(unrealizable).To(HaveLen(1)) - Expect(unrealizable["edb_test"]).To(HaveLen(1)) - Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo( - "could not perform DELETE on role edb_test: owner of database edbDatabase")) - }) + mock.ExpectExec(`GRANT "role2" TO "role_to_test1"`). + WillReturnError(&impossibleGrantError) - It("it will continue the synchronization even if it finds errors", func(ctx context.Context) { - trueValue := true - managedConf := apiv1.ManagedConfiguration{ - Roles: []apiv1.RoleConfiguration{ - { - Name: "edb_test", - Ensure: apiv1.EnsureAbsent, - }, - { - Name: "another_test", - Ensure: apiv1.EnsurePresent, - Superuser: true, - Inherit: &trueValue, - InRoles: []string{ - "role1", - "role2", - }, - }, - }, + mock.ExpectRollback() + + impossibleDeleteError := pgconn.PgError{ + Code: "2BP01", // 2BP01 -> dependent_objects_still_exist + Detail: "owner of database edbDatabase", } - rm := mockRoleManagerWithError{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, - "another_test": { - Name: "another_test", - Superuser: true, - Inherit: true, - }, + + roleDeletionStmt := fmt.Sprintf("DROP ROLE \"%s\"", "role_to_test2") + mock.ExpectExec(roleDeletionStmt).WillReturnError(&impossibleDeleteError) + + _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) + Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"delete", "edb_test"}, - funcCall{"getParentRoles", "another_test"}, - funcCall{"updateMembership", "another_test"}, - )) Expect(unrealizable).To(HaveLen(2)) - Expect(unrealizable["edb_test"]).To(HaveLen(1)) - Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo( - "could not perform DELETE on role edb_test: owner of database edbDatabase")) - Expect(unrealizable["another_test"]).To(HaveLen(1)) - Expect(unrealizable["another_test"][0]).To(BeEquivalentTo( - "could not perform UPDATE_MEMBERSHIPS on role another_test: unknown role 'blah'")) + Expect(unrealizable["role_to_test1"]).To(HaveLen(1)) + Expect(unrealizable["role_to_test1"][0]).To(BeEquivalentTo( + "could not perform UPDATE_MEMBERSHIPS on role role_to_test1: unknown role 'role2'")) + Expect(unrealizable["role_to_test2"]).To(HaveLen(1)) + Expect(unrealizable["role_to_test2"][0]).To(BeEquivalentTo( + "could not perform DELETE on role role_to_test2: owner of database edbDatabase")) }) }) }) -var _ = DescribeTable("Role status getter tests", - func(spec *apiv1.ManagedConfiguration, db mockRoleManager, expected map[string]apiv1.RoleStatus) { +var _ = DescribeTable("Role status tests", + func(spec *apiv1.ManagedConfiguration, roles []DatabaseRole, expected map[string]apiv1.RoleStatus) { ctx := context.TODO() - roles, err := db.List(ctx) - Expect(err).ToNot(HaveOccurred()) - statusMap := evaluateNextRoleActions(ctx, spec, roles, map[string]apiv1.PasswordState{ "roleWithChangedPassInSpec": { TransactionID: 101, @@ -639,17 +423,15 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "ensurePresent": { - Name: "ensurePresent", - Superuser: true, - Inherit: true, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "ensurePresent", + Superuser: true, + Inherit: true, }, }, map[string]apiv1.RoleStatus{ @@ -678,20 +460,18 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "unwantedInDB": { - Name: "unwantedInDB", - Superuser: true, - }, - "drifted": { - Name: "drifted", - Superuser: false, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "unwantedInDB", + Superuser: true, + }, + { + Name: "drifted", + Superuser: false, }, }, map[string]apiv1.RoleStatus{ @@ -711,21 +491,19 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_admin": { - Name: "edb_admin", - Superuser: true, - Inherit: true, - }, - "missingFromSpec": { - Name: "missingFromSpec", - Superuser: false, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "edb_admin", + Superuser: true, + Inherit: true, + }, + { + Name: "missingFromSpec", + Superuser: false, }, }, map[string]apiv1.RoleStatus{ @@ -745,18 +523,16 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "roleWithChangedPassInDB": { - Name: "roleWithChangedPassInDB", - Superuser: true, - transactionID: 102, - Inherit: true, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "roleWithChangedPassInDB", + Superuser: true, + transactionID: 102, + Inherit: true, }, }, map[string]apiv1.RoleStatus{ @@ -774,18 +550,16 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "roleWithChangedPassInSpec": { - Name: "roleWithChangedPassInSpec", - Superuser: true, - transactionID: 101, - Inherit: true, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "roleWithChangedPassInSpec", + Superuser: true, + transactionID: 101, + Inherit: true, }, }, map[string]apiv1.RoleStatus{ diff --git a/internal/management/controller/roles/suite_test.go b/internal/management/controller/roles/suite_test.go index a6dd16bc1d..c7fe63ee13 100644 --- a/internal/management/controller/roles/suite_test.go +++ b/internal/management/controller/roles/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles @@ -23,6 +26,29 @@ import ( . "github.com/onsi/gomega" ) +const ( + expectedSelStmt = `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls, + pg_catalog.shobj_description(auth.oid, 'pg_authid') as comment, auth.xmin, + mem.inroles + FROM pg_catalog.pg_authid as auth + LEFT JOIN ( + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member + ) mem ON member = oid + WHERE rolname not like 'pg\_%'` + + expectedMembershipStmt = `SELECT mem.inroles + FROM pg_catalog.pg_authid as auth + LEFT JOIN ( + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member + ) mem ON member = oid + WHERE rolname = $1` + + wantedRoleCommentTpl = "COMMENT ON ROLE \"%s\" IS %s" +) + func TestReconciler(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Internal Management Controller Roles Reconciler Suite") diff --git a/internal/management/controller/slots/infrastructure/contract.go b/internal/management/controller/slots/infrastructure/contract.go deleted file mode 100644 index d0e3d0d992..0000000000 --- a/internal/management/controller/slots/infrastructure/contract.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package infrastructure - -import ( - "context" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// Manager abstracts the operations that need to be sent to -// the database instance for the management of Replication Slots -type Manager interface { - // List the available replication slots - List(ctx context.Context, config *apiv1.ReplicationSlotsConfiguration) (ReplicationSlotList, error) - // Update the replication slot - Update(ctx context.Context, slot ReplicationSlot) error - // Create the replication slot - Create(ctx context.Context, slot ReplicationSlot) error - // Delete the replication slot - Delete(ctx context.Context, slot ReplicationSlot) error -} diff --git a/internal/management/controller/slots/infrastructure/doc.go b/internal/management/controller/slots/infrastructure/doc.go index d96244d8e9..f6d3af89c4 100644 --- a/internal/management/controller/slots/infrastructure/doc.go +++ b/internal/management/controller/slots/infrastructure/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package infrastructure contains the structs and interfaces needed to manage replication slots diff --git a/internal/management/controller/slots/infrastructure/postgresmanager.go b/internal/management/controller/slots/infrastructure/postgresmanager.go index 74360cf783..229ce2e610 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,51 +13,29 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure import ( "context" + "database/sql" "strings" "github.com/cloudnative-pg/machinery/pkg/log" v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" ) -// PostgresManager is a Manager for a database instance -type PostgresManager struct { - pool pool.Pooler -} - -// NewPostgresManager returns an implementation of Manager for postgres -func NewPostgresManager(pool pool.Pooler) Manager { - return PostgresManager{ - pool: pool, - } -} - -func (sm PostgresManager) String() string { - return sm.pool.GetDsn("postgres") -} - // List the available replication slots -func (sm PostgresManager) List( - ctx context.Context, - config *v1.ReplicationSlotsConfiguration, -) (ReplicationSlotList, error) { - db, err := sm.pool.Connection("postgres") - if err != nil { - return ReplicationSlotList{}, err - } - +func List(ctx context.Context, db *sql.DB, config *v1.ReplicationSlotsConfiguration) (ReplicationSlotList, error) { rows, err := db.QueryContext( ctx, `SELECT slot_name, slot_type, active, coalesce(restart_lsn::TEXT, '') AS restart_lsn, xmin IS NOT NULL OR catalog_xmin IS NOT NULL AS holds_xmin - FROM pg_replication_slots + FROM pg_catalog.pg_replication_slots WHERE NOT temporary AND slot_type = 'physical'`, ) if err != nil { @@ -100,49 +79,35 @@ func (sm PostgresManager) List( } // Update the replication slot -func (sm PostgresManager) Update(ctx context.Context, slot ReplicationSlot) error { +func Update(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("updateSlot") contextLog.Trace("Invoked", "slot", slot) if slot.RestartLSN == "" { return nil } - db, err := sm.pool.Connection("postgres") - if err != nil { - return err - } - _, err = db.ExecContext(ctx, "SELECT pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN) + _, err := db.ExecContext(ctx, "SELECT pg_catalog.pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN) return err } // Create the replication slot -func (sm PostgresManager) Create(ctx context.Context, slot ReplicationSlot) error { +func Create(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("createSlot") contextLog.Trace("Invoked", "slot", slot) - db, err := sm.pool.Connection("postgres") - if err != nil { - return err - } - - _, err = db.ExecContext(ctx, "SELECT pg_create_physical_replication_slot($1, $2)", + _, err := db.ExecContext(ctx, "SELECT pg_catalog.pg_create_physical_replication_slot($1, $2)", slot.SlotName, slot.RestartLSN != "") return err } // Delete the replication slot -func (sm PostgresManager) Delete(ctx context.Context, slot ReplicationSlot) error { +func Delete(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("dropSlot") contextLog.Trace("Invoked", "slot", slot) if slot.Active { return nil } - db, err := sm.pool.Connection("postgres") - if err != nil { - return err - } - - _, err = db.ExecContext(ctx, "SELECT pg_drop_replication_slot($1)", slot.SlotName) + _, err := db.ExecContext(ctx, "SELECT pg_catalog.pg_drop_replication_slot($1)", slot.SlotName) return err } diff --git a/internal/management/controller/slots/infrastructure/postgresmanager_test.go b/internal/management/controller/slots/infrastructure/postgresmanager_test.go index 251832847c..748e8c09a7 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager_test.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure import ( - "context" "database/sql" "errors" @@ -31,17 +33,15 @@ import ( var _ = Describe("PostgresManager", func() { var ( - manager Manager - mock sqlmock.Sqlmock - db *sql.DB - slot ReplicationSlot + mock sqlmock.Sqlmock + db *sql.DB + slot ReplicationSlot ) BeforeEach(func() { var err error db, mock, err = sqlmock.New() Expect(err).NotTo(HaveOccurred()) - manager = NewPostgresManager(&mockPooler{db: db}) slot = ReplicationSlot{ SlotName: "slot1", Type: SlotTypePhysical, @@ -55,26 +55,29 @@ var _ = Describe("PostgresManager", func() { }) Context("Create", func() { - It("should successfully create a replication slot", func() { - mock.ExpectExec("SELECT pg_create_physical_replication_slot"). + const expectedSQL = "SELECT pg_catalog.pg_create_physical_replication_slot" + It("should successfully create a replication slot", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN != ""). WillReturnResult(sqlmock.NewResult(1, 1)) - err := manager.Create(context.Background(), slot) + err := Create(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) - It("should return error when the database execution fails", func() { - mock.ExpectExec("SELECT pg_create_physical_replication_slot"). + It("should return error when the database execution fails", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN != ""). WillReturnError(errors.New("mock error")) - err := manager.Create(context.Background(), slot) + err := Create(ctx, db, slot) Expect(err).To(HaveOccurred()) }) }) Context("List", func() { + const expectedSQL = "^SELECT (.+) FROM pg_catalog.pg_replication_slots" + var config *v1.ReplicationSlotsConfiguration BeforeEach(func() { config = &v1.ReplicationSlotsConfiguration{ @@ -86,15 +89,15 @@ var _ = Describe("PostgresManager", func() { } }) - It("should successfully list replication slots", func() { + It("should successfully list replication slots", func(ctx SpecContext) { rows := sqlmock.NewRows([]string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"}). AddRow("_cnpg_slot1", string(SlotTypePhysical), true, "lsn1", false). AddRow("slot2", string(SlotTypePhysical), true, "lsn2", false) - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + mock.ExpectQuery(expectedSQL). WillReturnRows(rows) - result, err := manager.List(context.Background(), config) + result, err := List(ctx, db, config) Expect(err).NotTo(HaveOccurred()) Expect(result.Items).To(HaveLen(2)) Expect(result.Has("_cnpg_slot1")).To(BeTrue()) @@ -113,65 +116,69 @@ var _ = Describe("PostgresManager", func() { Expect(slot2.IsHA).To(BeFalse()) }) - It("should return error when database query fails", func() { - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + It("should return error when database query fails", func(ctx SpecContext) { + mock.ExpectQuery(expectedSQL). WillReturnError(errors.New("mock error")) - _, err := manager.List(context.Background(), config) + _, err := List(ctx, db, config) Expect(err).To(HaveOccurred()) }) }) Context("Update", func() { - It("should successfully update a replication slot", func() { - mock.ExpectExec("SELECT pg_replication_slot_advance"). + const expectedSQL = "SELECT pg_catalog.pg_replication_slot_advance" + + It("should successfully update a replication slot", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN). WillReturnResult(sqlmock.NewResult(1, 1)) - err := manager.Update(context.Background(), slot) + err := Update(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) - It("should return error when the database execution fails", func() { - mock.ExpectExec("SELECT pg_replication_slot_advance"). + It("should return error when the database execution fails", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN). WillReturnError(errors.New("mock error")) - err := manager.Update(context.Background(), slot) + err := Update(ctx, db, slot) Expect(err).To(HaveOccurred()) }) - It("should not update a replication slot when RestartLSN is empty", func() { + It("should not update a replication slot when RestartLSN is empty", func(ctx SpecContext) { slot.RestartLSN = "" - err := manager.Update(context.Background(), slot) + err := Update(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) }) Context("Delete", func() { - It("should successfully delete a replication slot", func() { + const expectedSQL = "SELECT pg_catalog.pg_drop_replication_slot" + + It("should successfully delete a replication slot", func(ctx SpecContext) { slot.Active = false - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot.SlotName). + mock.ExpectExec(expectedSQL).WithArgs(slot.SlotName). WillReturnResult(sqlmock.NewResult(1, 1)) - err := manager.Delete(context.Background(), slot) + err := Delete(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) - It("should return error when the database execution fails", func() { + It("should return error when the database execution fails", func(ctx SpecContext) { slot.Active = false - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot.SlotName). + mock.ExpectExec(expectedSQL).WithArgs(slot.SlotName). WillReturnError(errors.New("mock error")) - err := manager.Delete(context.Background(), slot) + err := Delete(ctx, db, slot) Expect(err).To(HaveOccurred()) }) - It("should not delete an active replication slot", func() { + It("should not delete an active replication slot", func(ctx SpecContext) { slot.RestartLSN = "" - err := manager.Delete(context.Background(), slot) + err := Delete(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/internal/management/controller/slots/infrastructure/replicationslot.go b/internal/management/controller/slots/infrastructure/replicationslot.go index 9cda0b2971..a5d2e5de03 100644 --- a/internal/management/controller/slots/infrastructure/replicationslot.go +++ b/internal/management/controller/slots/infrastructure/replicationslot.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/infrastructure/replicationslot_test.go b/internal/management/controller/slots/infrastructure/replicationslot_test.go index cd350b1828..80e94b471a 100644 --- a/internal/management/controller/slots/infrastructure/replicationslot_test.go +++ b/internal/management/controller/slots/infrastructure/replicationslot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/infrastructure/suite_test.go b/internal/management/controller/slots/infrastructure/suite_test.go index ec8b6e54af..5861937c63 100644 --- a/internal/management/controller/slots/infrastructure/suite_test.go +++ b/internal/management/controller/slots/infrastructure/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure import ( - "database/sql" - "errors" "testing" . "github.com/onsi/ginkgo/v2" @@ -29,23 +30,3 @@ func TestReconciler(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Internal Management Controller Slots Infrastructure Suite") } - -// mockPooler is a mock implementation of the Pooler interface -type mockPooler struct { - db *sql.DB -} - -func (mp *mockPooler) Connection(_ string) (*sql.DB, error) { - if mp.db == nil { - return nil, errors.New("connection error") - } - return mp.db, nil -} - -func (mp *mockPooler) GetDsn(_ string) string { - return "mocked DSN" -} - -func (mp *mockPooler) ShutdownConnections() { - // no-op in mock -} diff --git a/internal/management/controller/slots/reconciler/doc.go b/internal/management/controller/slots/reconciler/doc.go index 8092314cd5..7a06a8fc94 100644 --- a/internal/management/controller/slots/reconciler/doc.go +++ b/internal/management/controller/slots/reconciler/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package reconciler contains all the logic needed to reconcile replication slots diff --git a/internal/management/controller/slots/reconciler/replicationslot.go b/internal/management/controller/slots/reconciler/replicationslot.go index 7871358414..a1a4a2c54c 100644 --- a/internal/management/controller/slots/reconciler/replicationslot.go +++ b/internal/management/controller/slots/reconciler/replicationslot.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reconciler import ( "context" + "database/sql" "fmt" "time" @@ -32,7 +36,7 @@ import ( func ReconcileReplicationSlots( ctx context.Context, instanceName string, - manager infrastructure.Manager, + db *sql.DB, cluster *apiv1.Cluster, ) (reconcile.Result, error) { if cluster.Spec.ReplicationSlots == nil || @@ -48,11 +52,11 @@ func ReconcileReplicationSlots( // we also clean up the slots that fall under the user defined replication slots feature here. // TODO: split-out user defined replication slots code if !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() { - return dropReplicationSlots(ctx, manager, cluster, isPrimary) + return dropReplicationSlots(ctx, db, cluster, isPrimary) } if isPrimary { - return reconcilePrimaryHAReplicationSlots(ctx, manager, cluster) + return reconcilePrimaryHAReplicationSlots(ctx, db, cluster) } return reconcile.Result{}, nil @@ -61,13 +65,13 @@ func ReconcileReplicationSlots( // reconcilePrimaryHAReplicationSlots reconciles the HA replication slots of the primary instance func reconcilePrimaryHAReplicationSlots( ctx context.Context, - manager infrastructure.Manager, + db *sql.DB, cluster *apiv1.Cluster, ) (reconcile.Result, error) { contextLogger := log.FromContext(ctx) contextLogger.Debug("Updating primary HA replication slots") - currentSlots, err := manager.List(ctx, cluster.Spec.ReplicationSlots) + currentSlots, err := infrastructure.List(ctx, db, cluster.Spec.ReplicationSlots) if err != nil { return reconcile.Result{}, fmt.Errorf("reconciling primary replication slots: %w", err) } @@ -88,7 +92,7 @@ func reconcilePrimaryHAReplicationSlots( } // At this point, the cluster instance does not have a HA replication slot - if err := manager.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotName}); err != nil { + if err := infrastructure.Create(ctx, db, infrastructure.ReplicationSlot{SlotName: slotName}); err != nil { return reconcile.Result{}, fmt.Errorf("creating primary HA replication slots: %w", err) } } @@ -115,7 +119,7 @@ func reconcilePrimaryHAReplicationSlots( } contextLogger.Trace("Attempt to delete replication slot", "slot", slot) - if err := manager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, db, slot); err != nil { return reconcile.Result{}, fmt.Errorf("failure deleting replication slot %q: %w", slot.SlotName, err) } } @@ -133,7 +137,7 @@ func reconcilePrimaryHAReplicationSlots( // we also clean up the slots that fall under the user defined replication slots feature here. func dropReplicationSlots( ctx context.Context, - manager infrastructure.Manager, + db *sql.DB, cluster *apiv1.Cluster, isPrimary bool, ) (reconcile.Result, error) { @@ -144,7 +148,7 @@ func dropReplicationSlots( dropUserSlots := !cluster.Spec.ReplicationSlots.SynchronizeReplicas.GetEnabled() // we fetch all replication slots - slots, err := manager.List(ctx, cluster.Spec.ReplicationSlots) + slots, err := infrastructure.List(ctx, db, cluster.Spec.ReplicationSlots) if err != nil { return reconcile.Result{}, err } @@ -169,7 +173,7 @@ func dropReplicationSlots( } contextLogger.Trace("Attempt to delete replication slot", "slot", slot) - if err := manager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, db, slot); err != nil { return reconcile.Result{}, fmt.Errorf("while disabling standby HA replication slots: %w", err) } } diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go index 0634641475..961a639389 100644 --- a/internal/management/controller/slots/reconciler/replicationslot_test.go +++ b/internal/management/controller/slots/reconciler/replicationslot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +13,19 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reconciler import ( - "context" + "database/sql" + "database/sql/driver" "errors" - "strings" "time" + "github.com/DATA-DOG/go-sqlmock" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -31,58 +35,9 @@ import ( . "github.com/onsi/gomega" ) -type fakeSlot struct { - name string - active bool - isHA bool -} - -type fakeReplicationSlotManager struct { - replicationSlots map[fakeSlot]bool - triggerListError bool - triggerDeleteError bool -} - const slotPrefix = "_cnpg_" -func (fk fakeReplicationSlotManager) Create(_ context.Context, slot infrastructure.ReplicationSlot) error { - isHA := strings.HasPrefix(slot.SlotName, slotPrefix) - fk.replicationSlots[fakeSlot{name: slot.SlotName, isHA: isHA}] = true - return nil -} - -func (fk fakeReplicationSlotManager) Delete(_ context.Context, slot infrastructure.ReplicationSlot) error { - if fk.triggerDeleteError { - return errors.New("triggered delete error") - } - delete(fk.replicationSlots, fakeSlot{name: slot.SlotName, active: slot.Active, isHA: slot.IsHA}) - return nil -} - -func (fk fakeReplicationSlotManager) Update(_ context.Context, _ infrastructure.ReplicationSlot) error { - return nil -} - -func (fk fakeReplicationSlotManager) List( - _ context.Context, - _ *apiv1.ReplicationSlotsConfiguration, -) (infrastructure.ReplicationSlotList, error) { - var slotList infrastructure.ReplicationSlotList - if fk.triggerListError { - return slotList, errors.New("triggered list error") - } - - for slot := range fk.replicationSlots { - slotList.Items = append(slotList.Items, infrastructure.ReplicationSlot{ - SlotName: slot.name, - RestartLSN: "", - Type: infrastructure.SlotTypePhysical, - Active: slot.active, - IsHA: slot.isHA, - }) - } - return slotList, nil -} +var repSlotColumns = []string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"} func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1.Cluster { return apiv1.Cluster{ @@ -102,134 +57,156 @@ func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1. } } +func newRepSlot(name string, active bool, restartLSN string) []driver.Value { + return []driver.Value{ + slotPrefix + name, string(infrastructure.SlotTypePhysical), active, restartLSN, false, + } +} + var _ = Describe("HA Replication Slots reconciliation in Primary", func() { - It("can create a new replication slot for a new cluster instance", func() { - fakeSlotManager := fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: slotPrefix + "instance1", isHA: true}: true, - {name: slotPrefix + "instance2", isHA: true}: true, - }, - } + var ( + db *sql.DB + mock sqlmock.Sqlmock + ) + BeforeEach(func() { + var err error + db, mock, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(mock.ExpectationsWereMet()).To(Succeed()) + }) + It("can create a new replication slot for a new cluster instance", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...). + AddRow(newRepSlot("instance2", true, "lsn2")...) - cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2", "instance3"}, "instance1") + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). + WillReturnRows(rows) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeTrue()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue()) + mock.ExpectExec("SELECT pg_catalog.pg_create_physical_replication_slot"). + WithArgs(slotPrefix+"instance3", false). + WillReturnResult(sqlmock.NewResult(1, 1)) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2", "instance3"}, "instance1") + + _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster) Expect(err).ShouldNot(HaveOccurred()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeFalse()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeTrue()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue()) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) }) - It("can delete an inactive HA replication slot that is not in the cluster", func() { - fakeSlotManager := fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: slotPrefix + "instance1", isHA: true}: true, - {name: slotPrefix + "instance2", isHA: true}: true, - {name: slotPrefix + "instance3", isHA: true}: true, - }, - } + It("can delete an inactive HA replication slot that is not in the cluster", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...). + AddRow(newRepSlot("instance2", true, "lsn2")...). + AddRow(newRepSlot("instance3", false, "lsn2")...) - cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). + WillReturnRows(rows) + + mock.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotPrefix + "instance3"). + WillReturnResult(sqlmock.NewResult(1, 1)) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) + cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster) Expect(err).ShouldNot(HaveOccurred()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeFalse()) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(1)) }) - It("will not delete an active HA replication slot that is not in the cluster", func() { - fakeSlotManager := fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: slotPrefix + "instance1", isHA: true}: true, - {name: slotPrefix + "instance2", isHA: true}: true, - {name: slotPrefix + "instance3", isHA: true, active: true}: true, - }, - } + It("will not delete an active HA replication slot that is not in the cluster", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...). + AddRow(newRepSlot("instance2", true, "lsn2")...). + AddRow(newRepSlot("instance3", true, "lsn2")...) - cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). + WillReturnRows(rows) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) + cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster) Expect(err).ShouldNot(HaveOccurred()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: slotPrefix + "instance3", isHA: true, active: true}]). - To(BeTrue()) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) }) }) var _ = Describe("dropReplicationSlots", func() { - It("returns error when listing slots fails", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: make(map[fakeSlot]bool), - triggerListError: true, - } + const selectPgRepSlot = "^SELECT (.+) FROM pg_catalog.pg_replication_slots" + + var ( + db *sql.DB + mock sqlmock.Sqlmock + ) + BeforeEach(func() { + var err error + db, mock, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(mock.ExpectationsWereMet()).To(Succeed()) + }) + + It("returns error when listing slots fails", func(ctx SpecContext) { cluster := makeClusterWithInstanceNames([]string{}, "") - _, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + mock.ExpectQuery(selectPgRepSlot).WillReturnError(errors.New("triggered list error")) + + _, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("triggered list error")) }) - It("skips deletion of active HA slots and reschedules", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: true, isHA: true}: true, - }, - } + It("skips deletion of active HA slots and reschedules", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...) + mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) + cluster := makeClusterWithInstanceNames([]string{}, "") - res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + res, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(time.Second)) }) - It("skips the deletion of user defined replication slots on the primary", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: true}: true, - }, - } + It("skips the deletion of user defined replication slots on the primary", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow("custom-slot", string(infrastructure.SlotTypePhysical), true, "lsn1", false) + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). + WillReturnRows(rows) + cluster := makeClusterWithInstanceNames([]string{}, "") - res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + res, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(time.Duration(0))) Expect(res.IsZero()).To(BeTrue()) }) - It("returns error when deleting a slot fails", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: false, isHA: true}: true, - }, - triggerDeleteError: true, - } + It("returns error when deleting a slot fails", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", false, "lsn1")...) + mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) + + mock.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). + WillReturnError(errors.New("delete error")) + cluster := makeClusterWithInstanceNames([]string{}, "") - _, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + _, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("triggered delete error")) + Expect(err.Error()).To(ContainSubstring("delete error")) }) - It("deletes inactive slots and does not reschedule", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: false, isHA: true}: true, - }, - } + It("deletes inactive slots and does not reschedule", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", false, "lsn1")...) + mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) + + mock.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). + WillReturnResult(sqlmock.NewResult(1, 1)) + cluster := makeClusterWithInstanceNames([]string{}, "") - res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + res, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(time.Duration(0))) - Expect(fakeManager.replicationSlots).NotTo(HaveKey(fakeSlot{name: "slot1", active: false})) }) }) diff --git a/internal/management/controller/slots/reconciler/suite_test.go b/internal/management/controller/slots/reconciler/suite_test.go index e8a1f7999e..ea24d0a66d 100644 --- a/internal/management/controller/slots/reconciler/suite_test.go +++ b/internal/management/controller/slots/reconciler/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reconciler diff --git a/internal/management/controller/slots/runner/doc.go b/internal/management/controller/slots/runner/doc.go index 0a71445997..93f791917a 100644 --- a/internal/management/controller/slots/runner/doc.go +++ b/internal/management/controller/slots/runner/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package runner contains the runner that replicates slots from the primary to the replicas diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go index ac49f9b33e..e3bd9e12d0 100644 --- a/internal/management/controller/slots/runner/runner.go +++ b/internal/management/controller/slots/runner/runner.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package runner import ( "context" + "database/sql" "fmt" "time" @@ -45,7 +49,13 @@ func NewReplicator(instance *postgres.Instance) *Replicator { func (sr *Replicator) Start(ctx context.Context) error { contextLog := log.FromContext(ctx).WithName("Replicator") go func() { - config := <-sr.instance.SlotReplicatorChan() + var config *apiv1.ReplicationSlotsConfiguration + select { + case config = <-sr.instance.SlotReplicatorChan(): + case <-ctx.Done(): + return + } + updateInterval := config.GetUpdateInterval() ticker := time.NewTicker(updateInterval) @@ -108,11 +118,24 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl primaryPool := sr.instance.PrimaryConnectionPool() localPool := sr.instance.ConnectionPool() + primaryDB, err := primaryPool.Connection("postgres") + if err != nil { + return err + } + localDB, err := localPool.Connection("postgres") + if err != nil { + return err + } + contextLog.Trace("Invoked", + "primary", primaryPool.GetDsn("postgres"), + "local", localPool.GetDsn("postgres"), + "podName", sr.instance.GetPodName(), + "config", config) err = synchronizeReplicationSlots( ctx, - infrastructure.NewPostgresManager(primaryPool), - infrastructure.NewPostgresManager(localPool), - sr.instance.PodName, + primaryDB, + localDB, + sr.instance.GetPodName(), config, ) return err @@ -122,25 +145,20 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl // nolint: gocognit func synchronizeReplicationSlots( ctx context.Context, - primarySlotManager infrastructure.Manager, - localSlotManager infrastructure.Manager, + primaryDB *sql.DB, + localDB *sql.DB, podName string, config *apiv1.ReplicationSlotsConfiguration, ) error { contextLog := log.FromContext(ctx).WithName("synchronizeReplicationSlots") - contextLog.Trace("Invoked", - "primary", primarySlotManager, - "local", localSlotManager, - "podName", podName, - "config", config) - slotsInPrimary, err := primarySlotManager.List(ctx, config) + slotsInPrimary, err := infrastructure.List(ctx, primaryDB, config) if err != nil { return fmt.Errorf("getting replication slot status from primary: %v", err) } contextLog.Trace("primary slot status", "slotsInPrimary", slotsInPrimary) - slotsInLocal, err := localSlotManager.List(ctx, config) + slotsInLocal, err := infrastructure.List(ctx, localDB, config) if err != nil { return fmt.Errorf("getting replication slot status from local: %v", err) } @@ -167,12 +185,12 @@ func synchronizeReplicationSlots( } if !slotsInLocal.Has(slot.SlotName) { - err := localSlotManager.Create(ctx, slot) + err := infrastructure.Create(ctx, localDB, slot) if err != nil { return err } } - err := localSlotManager.Update(ctx, slot) + err := infrastructure.Update(ctx, localDB, slot) if err != nil { return err } @@ -184,14 +202,14 @@ func synchronizeReplicationSlots( // * slots holding xmin (this can happen on a former primary, and will prevent VACUUM from // removing tuples deleted by any later transaction.) if !slotsInPrimary.Has(slot.SlotName) || slot.SlotName == mySlotName || slot.HoldsXmin { - if err := localSlotManager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, localDB, slot); err != nil { return err } } // when the user turns off the feature we should delete all the created replication slots that aren't from HA if !slot.IsHA && !config.SynchronizeReplicas.GetEnabled() { - if err := localSlotManager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, localDB, slot); err != nil { return err } } diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go index de1df33e0b..34c55ab219 100644 --- a/internal/management/controller/slots/runner/runner_test.go +++ b/internal/management/controller/slots/runner/runner_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,14 +13,16 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package runner import ( - "context" - "fmt" + "database/sql" + "github.com/DATA-DOG/go-sqlmock" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -29,150 +32,138 @@ import ( . "github.com/onsi/gomega" ) -type fakeSlot struct { - name string - restartLSN string - holdsXmin bool -} - -type fakeSlotManager struct { - slots map[string]fakeSlot - slotsUpdated int - slotsCreated int - slotsDeleted int -} - -func (sm *fakeSlotManager) List( - _ context.Context, - _ *apiv1.ReplicationSlotsConfiguration, -) (infrastructure.ReplicationSlotList, error) { - var slotList infrastructure.ReplicationSlotList - for _, slot := range sm.slots { - slotList.Items = append(slotList.Items, infrastructure.ReplicationSlot{ - SlotName: slot.name, - RestartLSN: slot.restartLSN, - Type: infrastructure.SlotTypePhysical, - Active: false, - HoldsXmin: slot.holdsXmin, - }) - } - return slotList, nil -} - -func (sm *fakeSlotManager) Update(_ context.Context, slot infrastructure.ReplicationSlot) error { - localSlot, found := sm.slots[slot.SlotName] - if !found { - return fmt.Errorf("while updating slot: Slot %s not found", slot.SlotName) - } - if localSlot.restartLSN != slot.RestartLSN { - sm.slots[slot.SlotName] = fakeSlot{name: slot.SlotName, restartLSN: slot.RestartLSN} - sm.slotsUpdated++ - } - return nil -} - -func (sm *fakeSlotManager) Create(_ context.Context, slot infrastructure.ReplicationSlot) error { - if _, found := sm.slots[slot.SlotName]; found { - return fmt.Errorf("while creating slot: Slot %s already exists", slot.SlotName) - } - sm.slots[slot.SlotName] = fakeSlot{name: slot.SlotName, restartLSN: slot.RestartLSN} - sm.slotsCreated++ - return nil -} - -func (sm *fakeSlotManager) Delete(_ context.Context, slot infrastructure.ReplicationSlot) error { - if _, found := sm.slots[slot.SlotName]; !found { - return fmt.Errorf("while deleting slot: Slot %s not found", slot.SlotName) - } - delete(sm.slots, slot.SlotName) - sm.slotsDeleted++ - return nil -} - -var _ = Describe("Slot synchronization", func() { - localPodName := "cluster-2" - localSlotName := "_cnpg_cluster_2" - slot3 := "cluster-3" - slot4 := "cluster-4" - - primary := &fakeSlotManager{ - slots: map[string]fakeSlot{ - localSlotName: {name: localSlotName, restartLSN: "0/301C4D8"}, - slot3: {name: slot3, restartLSN: "0/302C4D8"}, - slot4: {name: slot4, restartLSN: "0/303C4D8"}, - }, - } - local := &fakeSlotManager{ - slots: map[string]fakeSlot{}, - } - config := apiv1.ReplicationSlotsConfiguration{ - HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_cnpg_", - }, - } +var _ = Describe("Slot synchronization", Ordered, func() { + const ( + selectPgReplicationSlots = "^SELECT (.+) FROM pg_catalog.pg_replication_slots" + selectPgSlotAdvance = "SELECT pg_catalog.pg_replication_slot_advance" + + localPodName = "cluster-2" + localSlotName = "_cnpg_cluster_2" + slot3 = "cluster-3" + slot4 = "cluster-4" + lsnSlot3 = "0/302C4D8" + lsnSlot4 = "0/303C4D8" + ) + + var ( + config = apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_cnpg_", + }, + } + columns = []string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"} + ) + + var ( + dbLocal, dbPrimary *sql.DB + mockLocal, mockPrimary sqlmock.Sqlmock + ) + + BeforeEach(func() { + var err error + dbLocal, mockLocal, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + dbPrimary, mockPrimary, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(mockLocal.ExpectationsWereMet()).To(Succeed(), "failed expectations in LOCAL") + Expect(mockPrimary.ExpectationsWereMet()).To(Succeed(), "failed expectations in PRIMARY") + }) It("can create slots in local from those on primary", func(ctx SpecContext) { - localSlotsBefore, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsBefore.Items).Should(BeEmpty()) - - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) + // the primary contains slots + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slot3, string(infrastructure.SlotTypePhysical), true, lsnSlot3, false). + AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false)) + + // but the local contains none + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns)) + + mockLocal.ExpectExec("SELECT pg_catalog.pg_create_physical_replication_slot"). + WithArgs(slot3, true). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot3, lsnSlot3). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockLocal.ExpectExec("SELECT pg_catalog.pg_create_physical_replication_slot"). + WithArgs(slot4, true). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot4, lsnSlot4). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - - localSlotsAfter, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Items).Should(HaveLen(2)) - Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) - Expect(localSlotsAfter.Has(slot4)).To(BeTrue()) - Expect(local.slotsCreated).To(Equal(2)) }) + It("can update slots in local when ReplayLSN in primary advanced", func(ctx SpecContext) { - // advance slot3 in primary newLSN := "0/308C4D8" - err := primary.Update(ctx, infrastructure.ReplicationSlot{SlotName: slot3, RestartLSN: newLSN}) - Expect(err).ShouldNot(HaveOccurred()) - - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) - Expect(err).ShouldNot(HaveOccurred()) - localSlotsAfter, err := local.List(ctx, &config) + // Simulate we advance slot3 in primary + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slot3, string(infrastructure.SlotTypePhysical), true, newLSN, false). + AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false)) + // But local has the old values + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(slot3, string(infrastructure.SlotTypePhysical), true, lsnSlot3, false). + AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false)) + + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot3, newLSN). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot4, lsnSlot4). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Items).Should(HaveLen(2)) - Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) - slot := localSlotsAfter.Get(slot3) - Expect(slot.RestartLSN).To(Equal(newLSN)) - Expect(local.slotsUpdated).To(Equal(1)) }) - It("can drop slots in local when they are no longer in primary", func(ctx SpecContext) { - err := primary.Delete(ctx, infrastructure.ReplicationSlot{SlotName: slot4}) - Expect(err).ShouldNot(HaveOccurred()) - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) - Expect(err).ShouldNot(HaveOccurred()) + It("can drop inactive slots in local when they are no longer in primary", func(ctx SpecContext) { + // Simulate primary has no longer slot4 + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false)) + // But local still has it + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(slot4, string(infrastructure.SlotTypePhysical), false, lsnSlot4, false)) - localSlotsAfter, err := local.List(ctx, &config) + mockLocal.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slot4). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Items).Should(HaveLen(1)) - Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) - Expect(local.slotsDeleted).To(Equal(1)) }) + It("can drop slots in local that hold xmin", func(ctx SpecContext) { slotWithXmin := "_cnpg_xmin" - err := primary.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotWithXmin}) - Expect(err).ShouldNot(HaveOccurred()) - local.slots[slotWithXmin] = fakeSlot{name: slotWithXmin, holdsXmin: true} - localSlotsBefore, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsBefore.Has(slotWithXmin)).To(BeTrue()) - - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) - Expect(err).ShouldNot(HaveOccurred()) - - localSlotsAfter, err := local.List(ctx, &config) + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slotWithXmin, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", true)) + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slotWithXmin, string(infrastructure.SlotTypePhysical), false, "0/301C4D8", true)) // inactive but with Xmin + + mockLocal.ExpectExec(selectPgSlotAdvance).WithArgs(slotWithXmin, "0/301C4D8"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockLocal.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotWithXmin). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Has(slotWithXmin)).To(BeFalse()) - Expect(localSlotsAfter.Items).Should(HaveLen(1)) - Expect(local.slotsDeleted).To(Equal(2)) }) }) diff --git a/internal/management/controller/slots/runner/suite_test.go b/internal/management/controller/slots/runner/suite_test.go index 330e97976b..313a1f1ac9 100644 --- a/internal/management/controller/slots/runner/suite_test.go +++ b/internal/management/controller/slots/runner/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package runner diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go new file mode 100644 index 0000000000..10f06a7192 --- /dev/null +++ b/internal/management/controller/subscription_controller.go @@ -0,0 +1,234 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// SubscriptionReconciler reconciles a Subscription object +type SubscriptionReconciler struct { + client.Client + Scheme *runtime.Scheme + + instance *postgres.Instance + finalizerReconciler *finalizerReconciler[*apiv1.Subscription] + getDB func(name string) (*sql.DB, error) + getPostgresMajorVersion func() (int, error) +} + +// subscriptionReconciliationInterval is the time between the +// subscription reconciliation loop failures +const subscriptionReconciliationInterval = 30 * time.Second + +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=subscriptions,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=subscriptions/status,verbs=get;update;patch + +// Reconcile is the subscription reconciliation loop +func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx). + WithName("subscription_reconciler"). + WithValues("subscriptionName", req.Name) + + // Get the subscription object + var subscription apiv1.Subscription + if err := r.Get(ctx, client.ObjectKey{ + Namespace: req.Namespace, + Name: req.Name, + }, &subscription); err != nil { + contextLogger.Trace("Could not fetch Subscription", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // This is not for me! + if subscription.Spec.ClusterRef.Name != r.instance.GetClusterName() { + contextLogger.Trace("Subscription is not for this cluster", + "cluster", subscription.Spec.ClusterRef.Name, + "expected", r.instance.GetClusterName(), + ) + return ctrl.Result{}, nil + } + + // If everything is reconciled, we're done here + if subscription.Generation == subscription.Status.ObservedGeneration { + return ctrl.Result{}, nil + } + + // Fetch the Cluster from the cache + cluster, err := r.GetCluster(ctx) + if err != nil { + return ctrl.Result{}, markAsFailed(ctx, r.Client, &subscription, fmt.Errorf("while fetching the cluster: %w", err)) + } + + // Still not for me, we're waiting for a switchover + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + // This is not for me, at least now + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + contextLogger.Info("Reconciling subscription") + defer func() { + contextLogger.Info("Reconciliation loop of subscription exited") + }() + + // Cannot do anything on a replica cluster + if cluster.IsReplica() { + if err := markAsUnknown(ctx, r.Client, &subscription, errClusterIsReplica); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + if err := r.finalizerReconciler.reconcile(ctx, &subscription); err != nil { + return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) + } + if !subscription.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil + } + + // Let's get the connection string + connString, err := getSubscriptionConnectionString( + cluster, + subscription.Spec.ExternalClusterName, + subscription.Spec.PublicationDBName, + ) + if err != nil { + if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the subscription resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the subscription resource: %w, original error: %w", + markErr, + err) + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + if res, err := detectConflictingManagers(ctx, r.Client, &subscription, &apiv1.SubscriptionList{}); err != nil || + !res.IsZero() { + return res, err + } + + if err := r.alignSubscription(ctx, &subscription, connString); err != nil { + contextLogger.Error(err, "while reconciling subscription") + if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the subscription resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the subscription resource: %w, original error: %w", + markErr, + err) + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + contextLogger.Info("Reconciliation of subscription completed") + if err := markAsReady(ctx, r.Client, &subscription); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil +} + +func (r *SubscriptionReconciler) evaluateDropSubscription(ctx context.Context, sub *apiv1.Subscription) error { + if sub.Spec.ReclaimPolicy != apiv1.SubscriptionReclaimDelete { + return nil + } + + db, err := r.getDB(sub.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + return executeDropSubscription(ctx, db, sub.Spec.Name) +} + +// NewSubscriptionReconciler creates a new subscription reconciler +func NewSubscriptionReconciler( + mgr manager.Manager, + instance *postgres.Instance, +) *SubscriptionReconciler { + sr := &SubscriptionReconciler{ + Client: mgr.GetClient(), + instance: instance, + getDB: func(name string) (*sql.DB, error) { + return instance.ConnectionPool().Connection(name) + }, + getPostgresMajorVersion: func() (int, error) { + version, err := instance.GetPgVersion() + return int(version.Major), err //nolint:gosec + }, + } + sr.finalizerReconciler = newFinalizerReconciler( + mgr.GetClient(), + utils.SubscriptionFinalizerName, + sr.evaluateDropSubscription, + ) + + return sr +} + +// SetupWithManager sets up the controller with the Manager +func (r *SubscriptionReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&apiv1.Subscription{}). + Named("instance-subscription"). + Complete(r) +} + +// GetCluster gets the managed cluster through the client +func (r *SubscriptionReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { + return getClusterFromInstance(ctx, r.Client, r.instance) +} + +// getSubscriptionConnectionString gets the connection string to be used to connect to +// the specified external cluster, while connected to a pod of the specified cluster +func getSubscriptionConnectionString( + cluster *apiv1.Cluster, + externalClusterName string, + databaseName string, +) (string, error) { + externalCluster, ok := cluster.ExternalCluster(externalClusterName) + if !ok { + return "", fmt.Errorf("externalCluster '%s' not declared in cluster %s", externalClusterName, cluster.Name) + } + + return external.GetServerConnectionString(&externalCluster, databaseName), nil +} diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go new file mode 100644 index 0000000000..89b255fc6d --- /dev/null +++ b/internal/management/controller/subscription_controller_sql.go @@ -0,0 +1,183 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + "github.com/jackc/pgx/v5" + "github.com/lib/pq" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +func (r *SubscriptionReconciler) alignSubscription( + ctx context.Context, + obj *apiv1.Subscription, + connString string, +) error { + db, err := r.getDB(obj.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + + row := db.QueryRowContext( + ctx, + ` + SELECT count(*) + FROM pg_catalog.pg_subscription + WHERE subname = $1 + `, + obj.Spec.Name) + if row.Err() != nil { + return fmt.Errorf("while getting subscription status: %w", row.Err()) + } + + var count int + if err := row.Scan(&count); err != nil { + return fmt.Errorf("while getting subscription status (scan): %w", err) + } + + if count > 0 { + if err := r.patchSubscription(ctx, db, obj, connString); err != nil { + return fmt.Errorf("while patching subscription: %w", err) + } + return nil + } + + if err := r.createSubscription(ctx, db, obj, connString); err != nil { + return fmt.Errorf("while creating subscription: %w", err) + } + + return nil +} + +func (r *SubscriptionReconciler) patchSubscription( + ctx context.Context, + db *sql.DB, + obj *apiv1.Subscription, + connString string, +) error { + version, err := r.getPostgresMajorVersion() + if err != nil { + return fmt.Errorf("while getting the PostgreSQL major version: %w", err) + } + sqls := toSubscriptionAlterSQL(obj, connString, version) + for _, sqlQuery := range sqls { + if _, err := db.ExecContext(ctx, sqlQuery); err != nil { + return err + } + } + + return nil +} + +func (r *SubscriptionReconciler) createSubscription( + ctx context.Context, + db *sql.DB, + obj *apiv1.Subscription, + connString string, +) error { + sqlQuery := toSubscriptionCreateSQL(obj, connString) + _, err := db.ExecContext(ctx, sqlQuery) + return err +} + +func toSubscriptionCreateSQL(obj *apiv1.Subscription, connString string) string { + createQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{obj.Spec.PublicationName}.Sanitize(), + ) + if len(obj.Spec.Parameters) > 0 { + createQuery = fmt.Sprintf("%s WITH (%s)", createQuery, toPostgresParameters(obj.Spec.Parameters)) + } + + return createQuery +} + +func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string, pgMajorVersion int) []string { + result := make([]string, 0, 3) + + setPublicationSQL := fmt.Sprintf( + "ALTER SUBSCRIPTION %s SET PUBLICATION %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + pgx.Identifier{obj.Spec.PublicationName}.Sanitize(), + ) + + setConnStringSQL := fmt.Sprintf( + "ALTER SUBSCRIPTION %s CONNECTION %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + ) + result = append(result, setPublicationSQL, setConnStringSQL) + + if len(obj.Spec.Parameters) > 0 { + result = append(result, + fmt.Sprintf( + "ALTER SUBSCRIPTION %s SET (%s)", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPostgresParameters(filterSubscriptionUpdatableParameters(obj.Spec.Parameters, pgMajorVersion)), + ), + ) + } + + return result +} + +func filterSubscriptionUpdatableParameters(parameters map[string]string, pgMajorVersion int) map[string]string { + // Only a limited set of the parameters can be updated + // see https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + allowedParameters := []string{ + "slot_name", + "synchronous_commit", + "binary", + "streaming", + "disable_on_error", + "password_required", + "run_as_owner", + "origin", + "failover", + } + if pgMajorVersion >= 18 { + allowedParameters = append(allowedParameters, "two_phase") + } + filteredParameters := make(map[string]string, len(parameters)) + for _, key := range allowedParameters { + if _, present := parameters[key]; present { + filteredParameters[key] = parameters[key] + } + } + return filteredParameters +} + +func executeDropSubscription(ctx context.Context, db *sql.DB, name string) error { + if _, err := db.ExecContext( + ctx, + fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{name}.Sanitize()), + ); err != nil { + return fmt.Errorf("while dropping subscription: %w", err) + } + + return nil +} diff --git a/internal/management/controller/subscription_controller_sql_test.go b/internal/management/controller/subscription_controller_sql_test.go new file mode 100644 index 0000000000..2678e75c54 --- /dev/null +++ b/internal/management/controller/subscription_controller_sql_test.go @@ -0,0 +1,198 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// nolint: dupl +package controller + +import ( + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// nolint: dupl +var _ = Describe("subscription sql", func() { + const defaultPostgresMajorVersion = 17 + + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + ) + + BeforeEach(func() { + var err error + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("drops the subscription successfully", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"subscription_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropSubscription(ctx, db, "subscription_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns an error when dropping the subscription fails", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"subscription_name"}.Sanitize())). + WillReturnError(fmt.Errorf("drop subscription error")) + + err := executeDropSubscription(ctx, db, "subscription_name") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("while dropping subscription: drop subscription error")) + }) + + It("sanitizes the subscription name correctly", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"sanitized_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropSubscription(ctx, db, "sanitized_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("generates correct SQL for creating subscription with publication and connection string", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sql := toSubscriptionCreateSQL(obj, connString) + Expect(sql).To(Equal( + `CREATE SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test' PUBLICATION "test_pub"`)) + }) + + It("generates correct SQL for creating subscription with parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + }, + } + connString := "host=localhost user=test dbname=test" + + sql := toSubscriptionCreateSQL(obj, connString) + expectedElement := `CREATE SUBSCRIPTION "test_sub" ` + + `CONNECTION 'host=localhost user=test dbname=test' ` + + `PUBLICATION "test_pub" WITH ("param1" = 'value1', "param2" = 'value2')` + Expect(sql).To(Equal(expectedElement)) + }) + + It("returns correct SQL for creating subscription with no owner or parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sql := toSubscriptionCreateSQL(obj, connString) + Expect(sql).To(Equal( + `CREATE SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test' PUBLICATION "test_pub"`)) + }) + + It("generates correct SQL for altering subscription with publication and connection string", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString, defaultPostgresMajorVersion) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + }) + + It("generates correct SQL for altering subscription with parameters for PostgreSQL 17", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + Parameters: map[string]string{ + "copy_data": "true", + "origin": "none", + "failover": "true", + "two_phase": "true", + }, + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString, 17) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET ("failover" = 'true', "origin" = 'none')`)) + }) + + It("generates correct SQL for altering subscription with parameters for PostgreSQL 18", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + Parameters: map[string]string{ + "copy_data": "true", + "origin": "none", + "failover": "true", + "two_phase": "true", + }, + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString, 18) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + Expect(sqls).To(ContainElement( + `ALTER SUBSCRIPTION "test_sub" SET ("failover" = 'true', "origin" = 'none', "two_phase" = 'true')`)) + }) + + It("returns correct SQL for altering subscription with no owner or parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString, defaultPostgresMajorVersion) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + }) +}) diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go new file mode 100644 index 0000000000..343dd4e4b8 --- /dev/null +++ b/internal/management/controller/subscription_controller_test.go @@ -0,0 +1,405 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + "github.com/lib/pq" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const subscriptionDetectionQuery = `SELECT count(*) + FROM pg_catalog.pg_subscription + WHERE subname = $1` + +var _ = Describe("Managed subscription controller tests", func() { + const defaultPostgresMajorVersion = 17 + + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + subscription *apiv1.Subscription + cluster *apiv1.Cluster + r *SubscriptionReconciler + fakeClient client.Client + connString string + err error + ) + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Status: apiv1.ClusterStatus{ + CurrentPrimary: "cluster-example-1", + TargetPrimary: "cluster-example-1", + }, + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "cluster-other", + ConnectionParameters: map[string]string{ + "host": "localhost", + }, + }, + }, + }, + } + subscription = &apiv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sub-one", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.SubscriptionSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + ReclaimPolicy: apiv1.SubscriptionReclaimDelete, + Name: "sub-one", + DBName: "app", + PublicationName: "pub-all", + PublicationDBName: "app", + ExternalClusterName: "cluster-other", + }, + } + connString, err = getSubscriptionConnectionString(cluster, "cluster-other", "app") + Expect(err).ToNot(HaveOccurred()) + + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") + + fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithObjects(cluster, subscription). + WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Subscription{}). + Build() + + r = &SubscriptionReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + getPostgresMajorVersion: func() (int, error) { + return defaultPostgresMajorVersion, nil + }, + } + r.finalizerReconciler = newFinalizerReconciler( + fakeClient, + utils.SubscriptionFinalizerName, + r.evaluateDropSubscription, + ) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("adds finalizer and sets status ready on success", func(ctx SpecContext) { + noHits := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(noHits) + + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(HaveValue(BeTrue())) + Expect(subscription.GetStatusMessage()).Should(BeEmpty()) + Expect(subscription.GetFinalizers()).NotTo(BeEmpty()) + }) + + It("subscription object inherits error after patching", func(ctx SpecContext) { + expectedError := fmt.Errorf("no permission") + oneHit := sqlmock.NewRows([]string{""}).AddRow("1") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(oneHit) + + expectedQuery := fmt.Sprintf("ALTER SUBSCRIPTION %s SET PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(HaveValue(BeFalse())) + Expect(subscription.Status.Message).Should(ContainSubstring(expectedError.Error())) + }) + + When("reclaim policy is delete", func() { + It("on deletion it removes finalizers and drops the subscription", func(ctx SpecContext) { + // Mocking detection of subscriptions + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking create subscription + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Mocking Drop subscription + expectedDrop := fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(subscription.GetFinalizers()).NotTo(BeEmpty()) + Expect(subscription.Status.Applied).Should(HaveValue(BeTrue())) + Expect(subscription.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + subscription.SetGeneration(subscription.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, subscription)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + When("reclaim policy is retain", func() { + It("on deletion it removes finalizers and does NOT drop the subscription", func(ctx SpecContext) { + subscription.Spec.ReclaimPolicy = apiv1.SubscriptionReclaimRetain + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + // Mocking Detect subscription + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking Create subscription + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(subscription.GetFinalizers()).NotTo(BeEmpty()) + Expect(subscription.Status.Applied).Should(HaveValue(BeTrue())) + Expect(subscription.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + subscription.SetGeneration(subscription.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, subscription)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + // Since the fakeClient has the `cluster-example` cluster, let's reference + // another cluster `cluster-other` that is not found by the fakeClient + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-other-1"). + WithClusterName("cluster-other") + + r = &SubscriptionReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + + // Updating the subscription object to reference the newly created Cluster + subscription.Spec.ClusterRef.Name = "cluster-other" + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(HaveValue(BeFalse())) + Expect(subscription.Status.Message).Should(ContainSubstring( + fmt.Sprintf("%q not found", subscription.Spec.ClusterRef.Name))) + }) + + It("skips reconciliation if subscription object isn't found (deleted subscription)", func(ctx SpecContext) { + // Initialize a new subscription but without creating it in the K8S Cluster + otherSubscription := &apiv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sub-other", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.SubscriptionSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "sub-one", + }, + } + + // Reconcile the subscription that hasn't been created in the K8S Cluster + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: otherSubscription.Namespace, + Name: otherSubscription.Name, + }}) + + // Expect the reconciler to exit silently, since the object doesn't exist + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(BeZero()) // nothing to do, since the subscription is being deleted + }) + + It("marks as failed if the target subscription is already being managed", func(ctx SpecContext) { + // Let's force the subscription to have a past reconciliation + subscription.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Update(ctx, subscription)).To(Succeed()) + + // A new subscription Object targeting the same "sub-one" + subDuplicate := &apiv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sub-duplicate", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.SubscriptionSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "sub-one", + PublicationName: "pub-all", + PublicationDBName: "app", + ExternalClusterName: "cluster-other", + }, + } + + // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + Expect(fakeClient.Create(ctx, subDuplicate)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subDuplicate) + Expect(err).ToNot(HaveOccurred()) + + expectedError := fmt.Sprintf("%q is already managed by object %q", + subDuplicate.Spec.Name, subscription.Name) + Expect(subDuplicate.Status.Applied).Should(HaveValue(BeFalse())) + Expect(subDuplicate.Status.Message).Should(ContainSubstring(expectedError)) + }) + + It("properly signals a subscription is on a replica cluster", func(ctx SpecContext) { + initialCluster := cluster.DeepCopy() + cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + } + Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(BeNil()) + Expect(subscription.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) + }) +}) + +func reconcileSubscription( + ctx context.Context, + fakeClient client.Client, + r *SubscriptionReconciler, + subscription *apiv1.Subscription, +) error { + GinkgoT().Helper() + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + return fakeClient.Get(ctx, client.ObjectKey{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }, subscription) +} diff --git a/internal/management/controller/suite_test.go b/internal/management/controller/suite_test.go index c9e4c918ec..dc97682f30 100644 --- a/internal/management/controller/suite_test.go +++ b/internal/management/controller/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/tablespaces/actions.go b/internal/management/controller/tablespaces/actions.go index d886dc4968..7df6d9d580 100644 --- a/internal/management/controller/tablespaces/actions.go +++ b/internal/management/controller/tablespaces/actions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces import ( "context" + "database/sql" "github.com/cloudnative-pg/machinery/pkg/log" @@ -27,7 +31,7 @@ import ( type tablespaceReconcilerStep interface { execute(ctx context.Context, - tbsManager infrastructure.TablespaceManager, + db *sql.DB, tbsStorageManager tablespaceStorageManager, ) apiv1.TablespaceState } @@ -38,7 +42,7 @@ type createTablespaceAction struct { func (r *createTablespaceAction) execute( ctx context.Context, - tbsManager infrastructure.TablespaceManager, + db *sql.DB, tbsStorageManager tablespaceStorageManager, ) apiv1.TablespaceState { contextLog := log.FromContext(ctx).WithName("tbs_create_reconciler") @@ -59,7 +63,7 @@ func (r *createTablespaceAction) execute( Name: r.tablespace.Name, Owner: r.tablespace.Owner.Name, } - err := tbsManager.Create(ctx, tablespace) + err := infrastructure.Create(ctx, db, tablespace) if err != nil { contextLog.Error(err, "while performing action", "tablespace", r.tablespace.Name) return apiv1.TablespaceState{ @@ -83,7 +87,7 @@ type updateTablespaceAction struct { func (r *updateTablespaceAction) execute( ctx context.Context, - tbsManager infrastructure.TablespaceManager, + db *sql.DB, _ tablespaceStorageManager, ) apiv1.TablespaceState { contextLog := log.FromContext(ctx).WithName("tbs_update_reconciler") @@ -93,7 +97,7 @@ func (r *updateTablespaceAction) execute( Name: r.tablespace.Name, Owner: r.tablespace.Owner.Name, } - err := tbsManager.Update(ctx, tablespace) + err := infrastructure.Update(ctx, db, tablespace) if err != nil { contextLog.Error( err, "while performing action", @@ -119,7 +123,7 @@ type noopTablespaceAction struct { func (r *noopTablespaceAction) execute( _ context.Context, - _ infrastructure.TablespaceManager, + _ *sql.DB, _ tablespaceStorageManager, ) apiv1.TablespaceState { return apiv1.TablespaceState{ diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 01cc58234a..378c8d6d61 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -1,69 +1,47 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces import ( "context" + "database/sql" + "errors" "fmt" "slices" + "github.com/DATA-DOG/go-sqlmock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/tablespaces/infrastructure" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -type mockTablespaceManager struct { - tablespaces map[string]infrastructure.Tablespace - callHistory []string -} - -func (m *mockTablespaceManager) List(_ context.Context) ([]infrastructure.Tablespace, error) { - m.callHistory = append(m.callHistory, "list") - re := make([]infrastructure.Tablespace, len(m.tablespaces)) - i := 0 - for _, r := range m.tablespaces { - re[i] = r - i++ - } - return re, nil -} - -func (m *mockTablespaceManager) Update( - _ context.Context, _ infrastructure.Tablespace, -) error { - m.callHistory = append(m.callHistory, "update") - return nil -} - -func (m *mockTablespaceManager) Create( - _ context.Context, tablespace infrastructure.Tablespace, -) error { - m.callHistory = append(m.callHistory, "create") - _, found := m.tablespaces[tablespace.Name] - if found { - return fmt.Errorf("trying to create existing tablespace: %s", tablespace.Name) - } - m.tablespaces[tablespace.Name] = tablespace - return nil -} - +// mockTablespaceStorageManager is a storage manager where storage exists by +// default unless explicitly mounted as unavailable type mockTablespaceStorageManager struct { unavailableStorageLocations []string } @@ -79,158 +57,314 @@ func (mst mockTablespaceStorageManager) getStorageLocation(tablespaceName string return fmt.Sprintf("/%s", tablespaceName) } -var _ = Describe("Tablespace synchronizer tests", func() { - tablespaceReconciler := TablespaceReconciler{ - instance: &postgres.Instance{ - Namespace: "myPod", +type fakeInstance struct { + *postgres.Instance + db *sql.DB +} + +func (f fakeInstance) GetSuperUserDB() (*sql.DB, error) { + return f.db, nil +} + +func (f fakeInstance) CanCheckReadiness() bool { + return true +} + +func (f fakeInstance) IsPrimary() (bool, error) { + return true, nil +} + +func (f fakeInstance) IsReady() error { + return nil +} + +const ( + expectedListStmt = ` + SELECT + pg_tablespace.spcname spcname, + COALESCE(pg_roles.rolname, '') rolname + FROM pg_catalog.pg_tablespace + LEFT JOIN pg_catalog.pg_roles ON pg_tablespace.spcowner = pg_roles.oid + WHERE spcname NOT LIKE $1 + ` + expectedCreateStmt = "CREATE TABLESPACE \"%s\" OWNER \"%s\" " + + "LOCATION '%s'" + + expectedUpdateStmt = "ALTER TABLESPACE \"%s\" OWNER TO \"%s\"" +) + +func getCluster(ctx context.Context, c client.Client, cluster *apiv1.Cluster) (*apiv1.Cluster, error) { + var updatedCluster apiv1.Cluster + err := c.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + }, &updatedCluster) + return &updatedCluster, err +} + +// tablespaceTest represents all the variable bits that go into a test of the +// tablespace reconciler +type tablespaceTest struct { + tablespacesInSpec []apiv1.TablespaceConfiguration + postgresExpectations func(sqlmock.Sqlmock) + shouldRequeue bool + storageManager tablespaceStorageManager + expectedTablespaceStatus []apiv1.TablespaceState +} + +// assertTablespaceReconciled is the full test, going from setting up the mocks +// and the cluster to verifying all expectations are met +func assertTablespaceReconciled(ctx context.Context, tt tablespaceTest) { + db, dbMock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), sqlmock.MonitorPingsOption(true)) + Expect(err).ToNot(HaveOccurred()) + + DeferCleanup(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", }, } + cluster.Spec.Tablespaces = tt.tablespacesInSpec + + fakeClient := fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithObjects(cluster). + WithStatusSubresource(&apiv1.Cluster{}). + Build() + + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithClusterName("cluster-example") + + instance := fakeInstance{ + Instance: pgInstance, + db: db, + } + + tablespaceReconciler := TablespaceReconciler{ + instance: &instance, + client: fakeClient, + storageManager: tt.storageManager, + } + + tt.postgresExpectations(dbMock) + + results, err := tablespaceReconciler.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + if tt.shouldRequeue { + Expect(results).NotTo(BeZero()) + } else { + Expect(results).To(BeZero()) + } + + updatedCluster, err := getCluster(ctx, fakeClient, cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Status.TablespacesStatus).To(Equal(tt.expectedTablespaceStatus)) +} +var _ = Describe("Tablespace synchronizer tests", func() { When("tablespace configurations are realizable", func() { It("will do nothing if the DB contains the tablespaces in spec", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - Owner: apiv1.DatabaseRoleRef{ - Name: "app", + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "app", + }, }, }, - } - tbsManager := mockTablespaceManager{ - tablespaces: map[string]infrastructure.Tablespace{ - "foo": { + postgresExpectations: func(mock sqlmock.Sqlmock) { + // we expect the reconciler to list the tablespaces on the DB + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "app") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + }, + shouldRequeue: false, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { Name: "foo", Owner: "app", + State: "reconciled", }, }, - } - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsSteps := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{}, tbsSteps) - Expect(result).To(ConsistOf(apiv1.TablespaceState{ - Name: "foo", - Owner: "app", - State: apiv1.TablespaceStatusReconciled, - Error: "", - })) - Expect(tbsManager.callHistory).To(HaveLen(1)) - Expect(tbsManager.callHistory).To(ConsistOf("list")) + }) }) It("will change the owner when needed", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - Owner: apiv1.DatabaseRoleRef{ - Name: "new_user", + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "new_user", + }, }, }, - } - tbsManager := mockTablespaceManager{ - tablespaces: map[string]infrastructure.Tablespace{ - "foo": { + postgresExpectations: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "app") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + stmt := fmt.Sprintf(expectedUpdateStmt, "foo", "new_user") + mock.ExpectExec(stmt). + WillReturnResult(sqlmock.NewResult(2, 1)) + }, + shouldRequeue: false, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { Name: "foo", - Owner: "app", + Owner: "new_user", + State: "reconciled", }, }, - } - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsByAction := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{}, tbsByAction) - Expect(result).To(ConsistOf( - apiv1.TablespaceState{ - Name: "foo", - Owner: "new_user", - State: apiv1.TablespaceStatusReconciled, - Error: "", - }, - )) - Expect(tbsManager.callHistory).To(HaveLen(2)) - Expect(tbsManager.callHistory).To(ConsistOf("list", "update")) + }) }) - It("will create a tablespace in spec that is missing from DB", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", + It("will create a tablespace in spec that is missing from DB if mount point exists", func(ctx context.Context) { + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + { + Name: "bar", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "new_user", + }, }, }, - { - Name: "bar", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", + postgresExpectations: func(mock sqlmock.Sqlmock) { + // we expect the reconciler to list the tablespaces on DB, and to + // create a new tablespace + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + stmt := fmt.Sprintf(expectedCreateStmt, "bar", "new_user", "/var/lib/postgresql/tablespaces/bar/data") + mock.ExpectExec(stmt). + WillReturnResult(sqlmock.NewResult(2, 1)) + }, + shouldRequeue: false, + storageManager: mockTablespaceStorageManager{ + unavailableStorageLocations: []string{ + "/foo", }, }, - } - tbsManager := mockTablespaceManager{ - tablespaces: map[string]infrastructure.Tablespace{ - "foo": { + expectedTablespaceStatus: []apiv1.TablespaceState{ + { + Name: "foo", + Owner: "", + State: "reconciled", + }, + { + Name: "bar", + Owner: "new_user", + State: "reconciled", + }, + }, + }) + }) + + It("will mark tablespace status as pending with error when the DB CREATE fails", func(ctx context.Context) { + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, }, + { + Name: "bar", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "new_user", + }, + }, + }, + postgresExpectations: func(mock sqlmock.Sqlmock) { + // we expect the reconciler to list the tablespaces on DB, and to + // create a new tablespace + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + // we simulate DB command failure + stmt := fmt.Sprintf(expectedCreateStmt, "bar", "new_user", "/var/lib/postgresql/tablespaces/bar/data") + mock.ExpectExec(stmt). + WillReturnError(errors.New("boom")) }, - } - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsSteps := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{}, tbsSteps) - Expect(result).To(ConsistOf( - apiv1.TablespaceState{ - Name: "foo", - Owner: "", - State: apiv1.TablespaceStatusReconciled, - }, - apiv1.TablespaceState{ - Name: "bar", - Owner: "", - State: apiv1.TablespaceStatusReconciled, - }, - )) - Expect(tbsManager.callHistory).To(HaveLen(2)) - Expect(tbsManager.callHistory).To(ConsistOf("list", "create")) + shouldRequeue: true, + storageManager: mockTablespaceStorageManager{ + unavailableStorageLocations: []string{ + "/foo", + }, + }, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { + Name: "foo", + Owner: "", + State: "reconciled", + }, + { + Name: "bar", + Owner: "new_user", + State: "pending", + Error: "while creating tablespace bar: boom", + }, + }, + }) }) It("will requeue the tablespace creation if the mount path doesn't exist", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - }, - } - tbsManager := mockTablespaceManager{} - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsByAction := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{ + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + }, + postgresExpectations: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}) + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + }, + shouldRequeue: true, + storageManager: mockTablespaceStorageManager{ unavailableStorageLocations: []string{ "/foo", }, - }, tbsByAction) - Expect(result).To(ConsistOf( - apiv1.TablespaceState{ - Name: "foo", - Owner: "", - State: apiv1.TablespaceStatusPendingReconciliation, - Error: "deferred until mount point is created", - }, - )) - Expect(tbsManager.callHistory).To(HaveLen(1)) - Expect(tbsManager.callHistory).To(ConsistOf("list")) + }, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { + Name: "foo", + Owner: "", + State: "pending", + Error: "deferred until mount point is created", + }, + }, + }) }) }) }) diff --git a/internal/management/controller/tablespaces/doc.go b/internal/management/controller/tablespaces/doc.go index 735eab3ebb..d1d885ad9c 100644 --- a/internal/management/controller/tablespaces/doc.go +++ b/internal/management/controller/tablespaces/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package tablespaces contains the runner to declarative tablespace diff --git a/internal/management/controller/tablespaces/infrastructure/contract.go b/internal/management/controller/tablespaces/infrastructure/contract.go index 398e277849..0538358da0 100644 --- a/internal/management/controller/tablespaces/infrastructure/contract.go +++ b/internal/management/controller/tablespaces/infrastructure/contract.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure -import "context" - // Tablespace represents the tablespace information read from / written to the Database type Tablespace struct { // Name is the name of the tablespace @@ -26,15 +27,3 @@ type Tablespace struct { // Owner is the owner of this tablespace Owner string `json:"owner"` } - -// TablespaceManager abstracts the functionality of reconciling with PostgreSQL tablespaces -type TablespaceManager interface { - // List the tablespace in the database - List(ctx context.Context) ([]Tablespace, error) - - // Create the tablespace in the database - Create(ctx context.Context, tablespace Tablespace) error - - // Update the tablespace in the database (change ownership) - Update(ctx context.Context, tablespace Tablespace) error -} diff --git a/internal/management/controller/tablespaces/infrastructure/doc.go b/internal/management/controller/tablespaces/infrastructure/doc.go index 1ecfb68d7a..32c3049a2f 100644 --- a/internal/management/controller/tablespaces/infrastructure/doc.go +++ b/internal/management/controller/tablespaces/infrastructure/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package infrastructure contains the structs and interfaces needed to manage declarative tablespace diff --git a/internal/management/controller/tablespaces/infrastructure/postgres.go b/internal/management/controller/tablespaces/infrastructure/postgres.go index 6b01c8184a..8b60421941 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure @@ -28,38 +31,21 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) -// postgresTablespaceManager is a TablespaceManager for a database instance -type postgresTablespaceManager struct { - superUserDB *sql.DB -} - -// NewPostgresTablespaceManager returns an implementation of TablespaceManager for postgres -func NewPostgresTablespaceManager(superDB *sql.DB) TablespaceManager { - return newPostgresTablespaceManager(superDB) -} - -// NewPostgresTablespaceManager returns an implementation of TablespaceManager for postgres -func newPostgresTablespaceManager(superDB *sql.DB) postgresTablespaceManager { - return postgresTablespaceManager{ - superUserDB: superDB, - } -} - // List the tablespaces in the database // The content exclude pg_default and pg_global database -func (tbsMgr postgresTablespaceManager) List(ctx context.Context) ([]Tablespace, error) { +func List(ctx context.Context, db *sql.DB) ([]Tablespace, error) { logger := log.FromContext(ctx).WithName("tbs_reconciler_list") logger.Trace("Invoked list") wrapErr := func(err error) error { return fmt.Errorf("while listing DB tablespaces: %w", err) } - rows, err := tbsMgr.superUserDB.QueryContext( + rows, err := db.QueryContext( ctx, ` SELECT pg_tablespace.spcname spcname, COALESCE(pg_roles.rolname, '') rolname - FROM pg_tablespace - LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid + FROM pg_catalog.pg_tablespace + LEFT JOIN pg_catalog.pg_roles ON pg_tablespace.spcowner = pg_roles.oid WHERE spcname NOT LIKE $1 `, postgres.SystemTablespacesPrefix, @@ -93,7 +79,7 @@ func (tbsMgr postgresTablespaceManager) List(ctx context.Context) ([]Tablespace, } // Create the tablespace in the database, if tablespace is temporary tablespace, need reload configure -func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespace) error { +func Create(ctx context.Context, db *sql.DB, tbs Tablespace) error { contextLog := log.FromContext(ctx).WithName("tbs_reconciler_create") tablespaceLocation := specs.LocationForTablespace(tbs.Name) @@ -104,7 +90,7 @@ func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespa return fmt.Errorf("while creating tablespace %s: %w", tbs.Name, err) } var err error - if _, err = tbsMgr.superUserDB.ExecContext( + if _, err = db.ExecContext( ctx, fmt.Sprintf( "CREATE TABLESPACE %s OWNER %s LOCATION '%s'", @@ -119,7 +105,7 @@ func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespa } // Update the tablespace in the database (change ownership) -func (tbsMgr postgresTablespaceManager) Update(ctx context.Context, tbs Tablespace) error { +func Update(ctx context.Context, db *sql.DB, tbs Tablespace) error { contextLog := log.FromContext(ctx).WithName("tbs_reconciler_update") tablespaceLocation := specs.LocationForTablespace(tbs.Name) @@ -130,7 +116,7 @@ func (tbsMgr postgresTablespaceManager) Update(ctx context.Context, tbs Tablespa return fmt.Errorf("while updating tablespace %s: %w", tbs.Name, err) } var err error - if _, err = tbsMgr.superUserDB.ExecContext( + if _, err = db.ExecContext( ctx, fmt.Sprintf( "ALTER TABLESPACE %s OWNER TO %s", diff --git a/internal/management/controller/tablespaces/infrastructure/postgres_test.go b/internal/management/controller/tablespaces/infrastructure/postgres_test.go index 51299e6c57..9ca57cb017 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres_test.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure @@ -30,36 +33,40 @@ var _ = Describe("Postgres tablespaces functions test", func() { SELECT pg_tablespace.spcname spcname, COALESCE(pg_roles.rolname, '') rolname - FROM pg_tablespace - LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid + FROM pg_catalog.pg_tablespace + LEFT JOIN pg_catalog.pg_roles ON pg_tablespace.spcowner = pg_roles.oid WHERE spcname NOT LIKE $1 ` expectedCreateStmt := "CREATE TABLESPACE \"%s\" OWNER \"%s\" " + "LOCATION '/var/lib/postgresql/tablespaces/atablespace/data'" + + expectedUpdateStmt := "ALTER TABLESPACE \"%s\" OWNER TO \"%s\"" + It("should send the expected query to list tablespaces and parse the return", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) + tbsName := "atablespace" + anotherTbsName := "anothertablespace" + ownerName := "postgres" - tbsManager := newPostgresTablespaceManager(db) rows := sqlmock.NewRows( []string{"spcname", "rolname"}). - AddRow("atablespace", "postgres"). - AddRow("anothertablespace", "postgres") + AddRow(tbsName, ownerName). + AddRow(anotherTbsName, ownerName) mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) - tbs, err := tbsManager.List(ctx) + tbs, err := List(ctx, db) Expect(err).ShouldNot(HaveOccurred()) Expect(tbs).To(HaveLen(2)) Expect(tbs).To(ConsistOf( - Tablespace{Name: "atablespace", Owner: "postgres"}, - Tablespace{Name: "anothertablespace", Owner: "postgres"})) + Tablespace{Name: tbsName, Owner: ownerName}, + Tablespace{Name: anotherTbsName, Owner: ownerName})) }) It("should detect error if the list query returns error", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - tbsManager := newPostgresTablespaceManager(db) mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnError(fmt.Errorf("boom")) - tbs, err := tbsManager.List(ctx) + tbs, err := List(ctx, db) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("boom")) Expect(tbs).To(BeEmpty()) @@ -71,10 +78,9 @@ var _ = Describe("Postgres tablespaces functions test", func() { tbsName := "atablespace" ownerName := "postgres" stmt := fmt.Sprintf(expectedCreateStmt, tbsName, ownerName) - tbsManager := newPostgresTablespaceManager(db) mock.ExpectExec(stmt). WillReturnResult(sqlmock.NewResult(2, 1)) - err = tbsManager.Create(ctx, Tablespace{Name: tbsName, Owner: "postgres"}) + err = Create(ctx, db, Tablespace{Name: tbsName, Owner: ownerName}) Expect(err).ShouldNot(HaveOccurred()) Expect(mock.ExpectationsWereMet()).To(Succeed()) }) @@ -84,12 +90,23 @@ var _ = Describe("Postgres tablespaces functions test", func() { tbsName := "atablespace" ownerName := "postgres" stmt := fmt.Sprintf(expectedCreateStmt, tbsName, ownerName) - tbsManager := newPostgresTablespaceManager(db) mock.ExpectExec(stmt). WillReturnError(fmt.Errorf("boom")) - err = tbsManager.Create(ctx, Tablespace{Name: tbsName, Owner: "postgres"}) + err = Create(ctx, db, Tablespace{Name: tbsName, Owner: ownerName}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("boom")) Expect(mock.ExpectationsWereMet()).To(Succeed()) }) + It("should issue the expected command to update a tablespace", func(ctx SpecContext) { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + tbsName := "atablespace" + ownerName := "postgres" + stmt := fmt.Sprintf(expectedUpdateStmt, tbsName, ownerName) + mock.ExpectExec(stmt). + WillReturnResult(sqlmock.NewResult(2, 1)) + err = Update(ctx, db, Tablespace{Name: tbsName, Owner: ownerName}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(mock.ExpectationsWereMet()).To(Succeed()) + }) }) diff --git a/internal/management/controller/tablespaces/infrastructure/suite_test.go b/internal/management/controller/tablespaces/infrastructure/suite_test.go index 25c2aee4fe..9a9b2091f4 100644 --- a/internal/management/controller/tablespaces/infrastructure/suite_test.go +++ b/internal/management/controller/tablespaces/infrastructure/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index 9e561b7b5a..027929c383 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces import ( "context" + "database/sql" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -27,18 +31,31 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" ) +// instanceInterface represents the behavior required for the reconciler for +// instance operations +type instanceInterface interface { + GetNamespaceName() string + GetClusterName() string + GetSuperUserDB() (*sql.DB, error) + IsPrimary() (bool, error) + IsReady() error + CanCheckReadiness() bool +} + // TablespaceReconciler is a Kubernetes controller that ensures Tablespaces // are created in Postgres type TablespaceReconciler struct { - instance *postgres.Instance - client client.Client + instance instanceInterface + storageManager tablespaceStorageManager + client client.Client } // NewTablespaceReconciler creates a new TablespaceReconciler func NewTablespaceReconciler(instance *postgres.Instance, client client.Client) *TablespaceReconciler { controller := &TablespaceReconciler{ - instance: instance, - client: client, + instance: instance, + client: client, + storageManager: instanceTablespaceStorageManager{}, } return controller } @@ -47,16 +64,17 @@ func NewTablespaceReconciler(instance *postgres.Instance, client client.Client) func (r *TablespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("instance-tablespaces"). Complete(r) } // GetCluster gets the managed cluster through the client func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { var cluster apiv1.Cluster - err := r.GetClient().Get(ctx, + err := r.client.Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { @@ -65,13 +83,3 @@ func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, return &cluster, nil } - -// GetClient returns the dynamic client that is being used for a certain reconciler -func (r *TablespaceReconciler) GetClient() client.Client { - return r.client -} - -// Instance returns the PostgreSQL instance that this reconciler is working on -func (r *TablespaceReconciler) Instance() *postgres.Instance { - return r.instance -} diff --git a/internal/management/controller/tablespaces/reconciler.go b/internal/management/controller/tablespaces/reconciler.go index 2c1e70d79f..85b82d2339 100644 --- a/internal/management/controller/tablespaces/reconciler.go +++ b/internal/management/controller/tablespaces/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces import ( "context" + "database/sql" "fmt" "time" @@ -28,7 +32,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/tablespaces/infrastructure" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/readiness" ) // Reconcile is the main reconciliation loop for the instance @@ -70,9 +73,10 @@ func (r *TablespaceReconciler) Reconcile( return reconcile.Result{}, nil } - checker := readiness.ForInstance(r.instance) - if checker.IsServerReady(ctx) != nil { - contextLogger.Debug("database not ready, skipping tablespace reconciling") + if err := r.instance.IsReady(); err != nil { + contextLogger.Debug( + "database not ready, skipping tablespace reconciling", + "err", err) return reconcile.Result{RequeueAfter: time.Second}, nil } @@ -96,9 +100,7 @@ func (r *TablespaceReconciler) reconcile( return nil, fmt.Errorf("while reconcile tablespaces: %w", err) } - tbsManager := infrastructure.NewPostgresTablespaceManager(superUserDB) - tbsStorageManager := instanceTablespaceStorageManager{} - tbsInDatabase, err := tbsManager.List(ctx) + tbsInDatabase, err := infrastructure.List(ctx, superUserDB) if err != nil { return nil, fmt.Errorf("could not fetch tablespaces from database: %w", err) } @@ -106,15 +108,14 @@ func (r *TablespaceReconciler) reconcile( steps := evaluateNextSteps(ctx, tbsInDatabase, cluster.Spec.Tablespaces) result := r.applySteps( ctx, - tbsManager, - tbsStorageManager, + superUserDB, steps, ) // update the cluster status updatedCluster := cluster.DeepCopy() updatedCluster.Status.TablespacesStatus = result - if err := r.GetClient().Status().Patch(ctx, updatedCluster, client.MergeFrom(cluster)); err != nil { + if err := r.client.Status().Patch(ctx, updatedCluster, client.MergeFrom(cluster)); err != nil { return nil, fmt.Errorf("while setting the tablespace reconciler status: %w", err) } @@ -132,14 +133,13 @@ func (r *TablespaceReconciler) reconcile( // if they arose when applying the steps func (r *TablespaceReconciler) applySteps( ctx context.Context, - tbsManager infrastructure.TablespaceManager, - tbsStorageManager tablespaceStorageManager, + db *sql.DB, actions []tablespaceReconcilerStep, ) []apiv1.TablespaceState { result := make([]apiv1.TablespaceState, len(actions)) for idx, step := range actions { - result[idx] = step.execute(ctx, tbsManager, tbsStorageManager) + result[idx] = step.execute(ctx, db, r.storageManager) } return result diff --git a/internal/management/controller/tablespaces/storage.go b/internal/management/controller/tablespaces/storage.go index c9984305aa..23feffffef 100644 --- a/internal/management/controller/tablespaces/storage.go +++ b/internal/management/controller/tablespaces/storage.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces @@ -22,6 +25,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) +// tablespaceStorageManager represents the required behavior in terms of storage +// for the tablespace reconciler type tablespaceStorageManager interface { getStorageLocation(tbsName string) string storageExists(tbsName string) (bool, error) diff --git a/internal/management/controller/tablespaces/suite_test.go b/internal/management/controller/tablespaces/suite_test.go index 103b56df8e..eca0dd4266 100644 --- a/internal/management/controller/tablespaces/suite_test.go +++ b/internal/management/controller/tablespaces/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/tablespaces.go b/internal/management/controller/tablespaces/tablespaces.go index 9d1699c117..bc7ca05763 100644 --- a/internal/management/controller/tablespaces/tablespaces.go +++ b/internal/management/controller/tablespaces/tablespaces.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/istio/doc.go b/internal/management/istio/doc.go index 23758bc70b..868d2fe783 100644 --- a/internal/management/istio/doc.go +++ b/internal/management/istio/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package istio implements functions needed to integrate with istio-proxy diff --git a/internal/management/istio/istio.go b/internal/management/istio/istio.go index 2bd4ae9228..1698497f4c 100644 --- a/internal/management/istio/istio.go +++ b/internal/management/istio/istio.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package istio diff --git a/internal/management/linkerd/doc.go b/internal/management/linkerd/doc.go index 386578fb51..58b114d430 100644 --- a/internal/management/linkerd/doc.go +++ b/internal/management/linkerd/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package linkerd implements functions needed to integrate with linkerd-proxy diff --git a/internal/management/linkerd/linkerd.go b/internal/management/linkerd/linkerd.go index d600dad137..66717c0038 100644 --- a/internal/management/linkerd/linkerd.go +++ b/internal/management/linkerd/linkerd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package linkerd diff --git a/internal/management/utils/secrets.go b/internal/management/utils/secrets.go index d60a19b1da..adeabc12c8 100644 --- a/internal/management/utils/secrets.go +++ b/internal/management/utils/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils contains uncategorized utilities only used diff --git a/internal/management/utils/secrets_test.go b/internal/management/utils/secrets_test.go index 6df8d4030f..8a2545a807 100644 --- a/internal/management/utils/secrets_test.go +++ b/internal/management/utils/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/internal/management/utils/suite_test.go b/internal/management/utils/suite_test.go index b9dffc58de..0eff704ab7 100644 --- a/internal/management/utils/suite_test.go +++ b/internal/management/utils/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/internal/pgbouncer/management/controller/instance.go b/internal/pgbouncer/management/controller/instance.go index 461552cc0d..d0c471acaa 100644 --- a/internal/pgbouncer/management/controller/instance.go +++ b/internal/pgbouncer/management/controller/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/instance_test.go b/internal/pgbouncer/management/controller/instance_test.go index 86e0c3b379..0893587617 100644 --- a/internal/pgbouncer/management/controller/instance_test.go +++ b/internal/pgbouncer/management/controller/instance_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/manager.go b/internal/pgbouncer/management/controller/manager.go index 0c97a1f95c..c115343ed0 100644 --- a/internal/pgbouncer/management/controller/manager.go +++ b/internal/pgbouncer/management/controller/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the functions in pgbouncer instance manager @@ -63,10 +66,12 @@ func NewPgBouncerReconciler(poolerNamespacedName types.NamespacedName) (*PgBounc // Run runs the reconciliation loop for this resource func (r *PgBouncerReconciler) Run(ctx context.Context) { + contextLogger := log.FromContext(ctx) + for { // Retry with exponential back-off, unless it is a connection refused error err := retry.OnError(retry.DefaultBackoff, func(err error) bool { - log.Error(err, "Error calling Watch") + contextLogger.Error(err, "Error calling Watch") return !utilnet.IsConnectionRefused(err) }, func() error { return r.watch(ctx) @@ -81,6 +86,8 @@ func (r *PgBouncerReconciler) Run(ctx context.Context) { // watch contains the main reconciler loop func (r *PgBouncerReconciler) watch(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + var err error r.poolerWatch, err = r.client.Watch(ctx, &apiv1.PoolerList{}, &ctrl.ListOptions{ @@ -98,7 +105,7 @@ func (r *PgBouncerReconciler) watch(ctx context.Context) error { return r.Reconcile(ctx, &receivedEvent) }) if err != nil { - log.Error(err, "Reconciliation error") + contextLogger.Error(err, "Reconciliation error") } } return nil @@ -118,7 +125,7 @@ func (r *PgBouncerReconciler) GetClient() ctrl.Client { // Reconcile is the main reconciliation loop for the pgbouncer instance func (r *PgBouncerReconciler) Reconcile(ctx context.Context, event *watch.Event) error { - contextLogger, _ := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx) contextLogger.Debug( "Reconciliation loop", "eventType", event.Type, @@ -205,7 +212,7 @@ func (r *PgBouncerReconciler) writePgBouncerConfig(ctx context.Context, pooler * return false, fmt.Errorf("while generating pgbouncer configuration: %w", err) } - return refreshConfigurationFiles(configFiles) + return refreshConfigurationFiles(ctx, configFiles) } // Init ensures that all PgBouncer requirement are met. @@ -214,6 +221,8 @@ func (r *PgBouncerReconciler) writePgBouncerConfig(ctx context.Context, pooler * // 1. create the pgbouncer configuration and the required secrets // 2. ensure that every needed folder is existent func (r *PgBouncerReconciler) Init(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + var pooler apiv1.Pooler // Get the pooler from the API Server @@ -228,7 +237,7 @@ func (r *PgBouncerReconciler) Init(ctx context.Context) error { // Ensure we have the directory to store the controlling socket if err := fileutils.EnsureDirectoryExists(config.PgBouncerSocketDir); err != nil { - log.Error(err, "while checking socket directory existed", "dir", config.PgBouncerSocketDir) + contextLogger.Error(err, "while checking socket directory existed", "dir", config.PgBouncerSocketDir) return err } diff --git a/internal/pgbouncer/management/controller/refresh.go b/internal/pgbouncer/management/controller/refresh.go index 2193c19f4a..9c4d49a512 100644 --- a/internal/pgbouncer/management/controller/refresh.go +++ b/internal/pgbouncer/management/controller/refresh.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller import ( + "context" "fmt" "github.com/cloudnative-pg/machinery/pkg/fileutils" @@ -27,16 +31,18 @@ import ( // refreshConfigurationFiles writes the configuration files, returning a // flag indicating if something is changed or not and an error status -func refreshConfigurationFiles(files config.ConfigurationFiles) (bool, error) { +func refreshConfigurationFiles(ctx context.Context, files config.ConfigurationFiles) (bool, error) { var changed bool + contextLogger := log.FromContext(ctx) + for fileName, content := range files { changedFile, err := fileutils.WriteFileAtomic(fileName, content, 0o600) if err != nil { return false, fmt.Errorf("while recreating configs:%w", err) } if changedFile { - log.Info("updated configuration file", "name", fileName) + contextLogger.Info("updated configuration file", "name", fileName) changed = true } } diff --git a/internal/pgbouncer/management/controller/refresh_test.go b/internal/pgbouncer/management/controller/refresh_test.go index 5565c5357c..e504ec1eda 100644 --- a/internal/pgbouncer/management/controller/refresh_test.go +++ b/internal/pgbouncer/management/controller/refresh_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller @@ -45,8 +48,8 @@ var _ = Describe("RefreshConfigurationFiles", func() { }) Context("when no files are passed", func() { - It("should return false and no error", func() { - changed, err := refreshConfigurationFiles(files) + It("should return false and no error", func(ctx SpecContext) { + changed, err := refreshConfigurationFiles(ctx, files) Expect(changed).To(BeFalse()) Expect(err).NotTo(HaveOccurred()) }) @@ -58,8 +61,8 @@ var _ = Describe("RefreshConfigurationFiles", func() { files[filepath.Join(tmpDir, "config2")] = []byte("content2") }) - It("should write content to files and return true", func() { - changed, err := refreshConfigurationFiles(files) + It("should write content to files and return true", func(ctx SpecContext) { + changed, err := refreshConfigurationFiles(ctx, files) Expect(changed).To(BeTrue()) Expect(err).NotTo(HaveOccurred()) @@ -76,8 +79,8 @@ var _ = Describe("RefreshConfigurationFiles", func() { files["/proc/you-cannot-write-here.conf"] = []byte("content") }) - It("should return an error", func() { - _, err := refreshConfigurationFiles(files) + It("should return an error", func(ctx SpecContext) { + _, err := refreshConfigurationFiles(ctx, files) Expect(err).To(HaveOccurred()) }) }) diff --git a/internal/pgbouncer/management/controller/secrets.go b/internal/pgbouncer/management/controller/secrets.go index e781963617..a12dd6e392 100644 --- a/internal/pgbouncer/management/controller/secrets.go +++ b/internal/pgbouncer/management/controller/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/secrets_test.go b/internal/pgbouncer/management/controller/secrets_test.go index 8da0a4be70..6f8d210b3c 100644 --- a/internal/pgbouncer/management/controller/secrets_test.go +++ b/internal/pgbouncer/management/controller/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/suite_test.go b/internal/pgbouncer/management/controller/suite_test.go index c12d956a01..c2ec695961 100644 --- a/internal/pgbouncer/management/controller/suite_test.go +++ b/internal/pgbouncer/management/controller/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/plugin/resources/doc.go b/internal/plugin/resources/doc.go index a26a0e07fb..0053dc6a8b 100644 --- a/internal/plugin/resources/doc.go +++ b/internal/plugin/resources/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package resources contains reusable functions for the plugin commands diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go index 1ba6a58d47..9fdab1378d 100644 --- a/internal/plugin/resources/instance.go +++ b/internal/plugin/resources/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources @@ -33,9 +36,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -59,7 +62,7 @@ func GetInstancePods(ctx context.Context, clusterName string) ([]corev1.Pod, cor var managedPods []corev1.Pod var primaryPod corev1.Pod for idx := range pods.Items { - for _, owner := range pods.Items[idx].ObjectMeta.OwnerReferences { + for _, owner := range pods.Items[idx].OwnerReferences { if owner.Kind == apiv1.ClusterKind && owner.Name == clusterName { managedPods = append(managedPods, pods.Items[idx]) if specs.IsPodPrimary(pods.Items[idx]) { @@ -74,10 +77,14 @@ func GetInstancePods(ctx context.Context, clusterName string) ([]corev1.Pod, cor // ExtractInstancesStatus extracts the instance status from the given pod list func ExtractInstancesStatus( ctx context.Context, + cluster *apiv1.Cluster, config *rest.Config, filteredPods []corev1.Pod, ) (postgres.PostgresqlStatusList, []error) { - var result postgres.PostgresqlStatusList + result := postgres.PostgresqlStatusList{ + IsReplicaCluster: cluster.IsReplica(), + CurrentPrimary: cluster.Status.CurrentPrimary, + } var errs []error for idx := range filteredPods { @@ -103,7 +110,7 @@ func getInstanceStatusFromPod( CoreV1(). Pods(pod.Namespace). ProxyGet( - instance.GetStatusSchemeFromPod(&pod).ToString(), + remote.GetStatusSchemeFromPod(&pod).ToString(), pod.Name, strconv.Itoa(int(url.StatusPort)), url.PathPgStatus, @@ -112,7 +119,9 @@ func getInstanceStatusFromPod( DoRaw(ctx) if err != nil { result.AddPod(pod) - result.Error = err + result.Error = fmt.Errorf( + "failed to get status by proxying to the pod, you might lack permissions to get pods/proxy: %w", + err) return result } diff --git a/internal/scheme/doc.go b/internal/scheme/doc.go index 1f1e89c07e..b9a1289eec 100644 --- a/internal/scheme/doc.go +++ b/internal/scheme/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package scheme offers a builder capable of generating a scheme with the resources known by the CNP manager diff --git a/internal/scheme/scheme.go b/internal/scheme/scheme.go index a26858459f..adc11c16d0 100644 --- a/internal/scheme/scheme.go +++ b/internal/scheme/scheme.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package scheme diff --git a/internal/tools/tools.go b/internal/tools/tools.go index b868e6be3c..913a770727 100644 --- a/internal/tools/tools.go +++ b/internal/tools/tools.go @@ -2,7 +2,8 @@ // +build tools /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package tools is used to track dependencies of tools we use in our diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go new file mode 100644 index 0000000000..29fd04c7f7 --- /dev/null +++ b/internal/webhook/v1/backup_webhook.go @@ -0,0 +1,187 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + "strconv" + + "github.com/cloudnative-pg/machinery/pkg/log" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// backupLog is for logging in this package. +var backupLog = log.WithName("backup-resource").WithValues("version", "v1") + +// SetupBackupWebhookWithManager registers the webhook for Backup in the manager. +func SetupBackupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Backup{}). + WithValidator(newBypassableValidator(&BackupCustomValidator{})). + WithDefaulter(&BackupCustomDefaulter{}). + Complete() +} + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-backup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,verbs=create;update,versions=v1,name=mbackup.cnpg.io,sideEffects=None + +// BackupCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind Backup when those are created or updated. +type BackupCustomDefaulter struct{} + +var _ webhook.CustomDefaulter = &BackupCustomDefaulter{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Backup. +func (d *BackupCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + backup, ok := obj.(*apiv1.Backup) + if !ok { + return fmt.Errorf("expected an Backup object but got %T", obj) + } + backupLog.Info("Defaulting for Backup", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + // TODO(user): fill in your defaulting logic. + + return nil +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-backup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,versions=v1,name=vbackup.cnpg.io,sideEffects=None + +// BackupCustomValidator struct is responsible for validating the Backup resource +// when it is created, updated, or deleted. +type BackupCustomValidator struct{} + +var _ webhook.CustomValidator = &BackupCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Backup. +func (v *BackupCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("expected a Backup object but got %T", obj) + } + backupLog.Info("Validation for Backup upon creation", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + allErrs := v.validate(backup) + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"}, + backup.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Backup. +func (v *BackupCustomValidator) ValidateUpdate( + _ context.Context, + _, newObj runtime.Object, +) (admission.Warnings, error) { + backup, ok := newObj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("expected a Backup object for the newObj but got %T", newObj) + } + backupLog.Info("Validation for Backup upon update", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + allErrs := v.validate(backup) + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"}, + backup.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Backup. +func (v *BackupCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("expected a Backup object but got %T", obj) + } + backupLog.Info("Validation for Backup upon deletion", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +func (v *BackupCustomValidator) validate(r *apiv1.Backup) field.ErrorList { + var result field.ErrorList + + if r.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { + result = append(result, field.Invalid( + field.NewPath("spec", "method"), + r.Spec.Method, + "Cannot use volumeSnapshot backup method due to missing "+ + "VolumeSnapshot CRD. If you installed the CRD after having "+ + "started the operator, please restart it to enable "+ + "VolumeSnapshot support", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.Online != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "online"), + r.Spec.Online, + "Online parameter can be specified only if the backup method is volumeSnapshot", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "onlineConfiguration"), + r.Spec.OnlineConfiguration, + "OnlineConfiguration parameter can be specified only if the backup method is volumeSnapshot", + )) + } + + if r.Spec.Method == apiv1.BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() { + result = append(result, field.Invalid( + field.NewPath("spec", "pluginConfiguration"), + r.Spec.OnlineConfiguration, + "cannot be empty when the backup method is plugin", + )) + } + + if value := r.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]; value != "" { + _, err := strconv.Atoi(value) + if err != nil { + result = append(result, field.Invalid( + field.NewPath("metadata", "annotations", utils.BackupVolumeSnapshotDeadlineAnnotationName), + value, + "must be an integer", + )) + } + } + + return result +} diff --git a/internal/webhook/v1/backup_webhook_test.go b/internal/webhook/v1/backup_webhook_test.go new file mode 100644 index 0000000000..2cd23679f6 --- /dev/null +++ b/internal/webhook/v1/backup_webhook_test.go @@ -0,0 +1,121 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Backup webhook validate", func() { + var v *BackupCustomValidator + BeforeEach(func() { + v = &BackupCustomValidator{} + }) + + It("doesn't complain if VolumeSnapshot CRD is present", func() { + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodVolumeSnapshot, + }, + } + utils.SetVolumeSnapshot(true) + result := v.validate(backup) + Expect(result).To(BeEmpty()) + }) + + It("complains if VolumeSnapshot CRD is not present", func() { + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodVolumeSnapshot, + }, + } + utils.SetVolumeSnapshot(false) + result := v.validate(backup) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.method")) + }) + + It("complains if online is set on a barman backup", func() { + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + Online: ptr.To(true), + }, + } + result := v.validate(backup) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.online")) + }) + + It("complains if onlineConfiguration is set on a barman backup", func() { + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + OnlineConfiguration: &apiv1.OnlineConfiguration{}, + }, + } + result := v.validate(backup) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) + }) + + It("returns error if BackupVolumeSnapshotDeadlineAnnotationName is not an integer", func() { + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.BackupVolumeSnapshotDeadlineAnnotationName: "not-an-integer", + }, + }, + } + result := v.validate(backup) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("metadata.annotations." + utils.BackupVolumeSnapshotDeadlineAnnotationName)) + Expect(result[0].Error()).To(ContainSubstring("must be an integer")) + }) + + It("does not return error if BackupVolumeSnapshotDeadlineAnnotationName is an integer", func() { + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.BackupVolumeSnapshotDeadlineAnnotationName: "123", + }, + }, + } + result := v.validate(backup) + Expect(result).To(BeEmpty()) + }) + + It("does not return error if BackupVolumeSnapshotDeadlineAnnotationName is not set", func() { + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + } + result := v.validate(backup) + Expect(result).To(BeEmpty()) + }) +}) diff --git a/api/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go similarity index 63% rename from api/v1/cluster_webhook.go rename to internal/webhook/v1/cluster_webhook.go index 8b82c6b9e5..e82890f793 100644 --- a/api/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 import ( + "context" "encoding/json" "fmt" "slices" @@ -24,10 +28,14 @@ import ( "strings" barmanWebhooks "github.com/cloudnative-pg/barman-cloud/pkg/api/webhooks" + "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/machinery/pkg/types" + jsonpatch "github.com/evanphx/json-patch/v5" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -40,397 +48,218 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -const ( - // DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries - DefaultMonitoringKey = "queries" - // DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries, - // if configured - DefaultMonitoringConfigMapName = "cnpg-default-monitoring" - // DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries, - // if configured - DefaultMonitoringSecretName = DefaultMonitoringConfigMapName - // DefaultApplicationDatabaseName is the name of application database if not specified - DefaultApplicationDatabaseName = "app" - // DefaultApplicationUserName is the name of application database owner if not specified - DefaultApplicationUserName = DefaultApplicationDatabaseName -) - const sharedBuffersParameter = "shared_buffers" // clusterLog is for logging in this package. var clusterLog = log.WithName("cluster-resource").WithValues("version", "v1") -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *Cluster) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). +// SetupClusterWebhookWithManager registers the webhook for Cluster in the manager. +func SetupClusterWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Cluster{}). + WithValidator(newBypassableValidator(&ClusterCustomValidator{})). + WithDefaulter(&ClusterCustomDefaulter{}). Complete() } +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. // +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-cluster,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,verbs=create;update,versions=v1,name=mcluster.cnpg.io,sideEffects=None -var _ webhook.Defaulter = &Cluster{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *Cluster) Default() { - clusterLog.Info("default", "name", r.Name, "namespace", r.Namespace) - - r.setDefaults(true) -} - -// SetDefaults apply the defaults to undefined values in a Cluster -func (r *Cluster) SetDefaults() { - r.setDefaults(false) -} - -func (r *Cluster) setDefaults(preserveUserSettings bool) { - // Defaulting the image name if not specified - if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil { - r.Spec.ImageName = configuration.Current.PostgresImageName - } - - // Defaulting the bootstrap method if not specified - if r.Spec.Bootstrap == nil { - r.Spec.Bootstrap = &BootstrapConfiguration{} - } - - // Defaulting initDB if no other boostrap method was passed - switch { - case r.Spec.Bootstrap.Recovery != nil: - r.defaultRecovery() - case r.Spec.Bootstrap.PgBaseBackup != nil: - r.defaultPgBaseBackup() - default: - r.defaultInitDB() - } +// ClusterCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind Cluster when those are created or updated. +type ClusterCustomDefaulter struct{} - // Defaulting the pod anti-affinity type if podAntiAffinity - if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) && - r.Spec.Affinity.PodAntiAffinityType == "" { - r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred - } +var _ webhook.CustomDefaulter = &ClusterCustomDefaulter{} - if r.Spec.Backup != nil && r.Spec.Backup.Target == "" { - r.Spec.Backup.Target = DefaultBackupTarget +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Cluster. +func (d *ClusterCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return fmt.Errorf("expected a Cluster object but got %T", obj) } + clusterLog.Info("Defaulting for Cluster", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) - psqlVersion, err := r.GetPostgresqlVersion() - if err == nil { - // The validation error will be already raised by the - // validateImageName function - info := postgres.ConfigurationInfo{ - Settings: postgres.CnpgConfigurationSettings, - MajorVersion: psqlVersion, - UserSettings: r.Spec.PostgresConfiguration.Parameters, - IsReplicaCluster: r.IsReplica(), - PreserveFixedSettingsFromUser: preserveUserSettings, - IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), - IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem, - } - sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters() - r.Spec.PostgresConfiguration.Parameters = sanitizedParameters - } - - if r.Spec.LogLevel == "" { - r.Spec.LogLevel = log.InfoLevelString - } + cluster.Default() - // we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty - // and defaultQueries not disabled on cluster crd - if !r.Spec.Monitoring.AreDefaultQueriesDisabled() { - r.defaultMonitoringQueries(configuration.Current) - } + return nil +} - // If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots - if r.Spec.ReplicationSlots == nil { - r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{} - } - if r.Spec.ReplicationSlots.HighAvailability == nil { - r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_cnpg_", - } - } - if r.Spec.ReplicationSlots.SynchronizeReplicas == nil { - r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{ - Enabled: ptr.To(true), - } - } +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-cluster,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,versions=v1,name=vcluster.cnpg.io,sideEffects=None - if len(r.Spec.Tablespaces) > 0 { - r.defaultTablespaces() - } +// ClusterCustomValidator struct is responsible for validating the Cluster resource +// when it is created, updated, or deleted. +type ClusterCustomValidator struct{} - r.setDefaultPlugins(configuration.Current) -} +var _ webhook.CustomValidator = &ClusterCustomValidator{} -func (r *Cluster) setDefaultPlugins(config *configuration.Data) { - // Add the list of pre-defined plugins - foundPlugins := stringset.New() - for _, plugin := range r.Spec.Plugins { - foundPlugins.Put(plugin.Name) +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Cluster. +func (v *ClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object but got %T", obj) } + clusterLog.Info("Validation for Cluster upon creation", "name", cluster.GetName(), "namespace", + cluster.GetNamespace()) - for _, pluginName := range config.GetIncludePlugins() { - if !foundPlugins.Has(pluginName) { - r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{ - Name: pluginName, - Enabled: ptr.To(true), - }) - } - } -} + allErrs := v.validate(cluster) + allWarnings := v.getAdmissionWarnings(cluster) -// defaultTablespaces adds the tablespace owner where the -// user didn't specify it -func (r *Cluster) defaultTablespaces() { - defaultOwner := r.GetApplicationDatabaseOwner() - if len(defaultOwner) == 0 { - defaultOwner = "postgres" + if len(allErrs) == 0 { + return allWarnings, nil } - for name, tablespaceConfiguration := range r.Spec.Tablespaces { - if len(tablespaceConfiguration.Owner.Name) == 0 { - tablespaceConfiguration.Owner.Name = defaultOwner - } - r.Spec.Tablespaces[name] = tablespaceConfiguration - } + return allWarnings, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"}, + cluster.Name, allErrs) } -// defaultMonitoringQueries adds the default monitoring queries configMap -// if not already present in CustomQueriesConfigMap -func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) { - if r.Spec.Monitoring == nil { - r.Spec.Monitoring = &MonitoringConfiguration{} +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Cluster. +func (v *ClusterCustomValidator) ValidateUpdate( + _ context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + cluster, ok := newObj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object for the newObj but got %T", newObj) } - if config.MonitoringQueriesConfigmap != "" { - var defaultConfigMapQueriesAlreadyPresent bool - // We check if the default queries are already inserted in the monitoring configuration - for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap { - if monitoringConfigMap.Name == DefaultMonitoringConfigMapName { - defaultConfigMapQueriesAlreadyPresent = true - break - } - } - - // If the default queries are already present there is no need to re-add them. - // Please note that in this case that the default configMap could overwrite user existing queries - // depending on the order. This is an accepted behavior because the user willingly defined the order of his array - if !defaultConfigMapQueriesAlreadyPresent { - r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: DefaultMonitoringKey, - }, - }, r.Spec.Monitoring.CustomQueriesConfigMap...) - } + oldCluster, ok := oldObj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object for the oldObj but got %T", oldObj) } - if config.MonitoringQueriesSecret != "" { - var defaultSecretQueriesAlreadyPresent bool - // we check if the default queries are already inserted in the monitoring configuration - for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret { - if monitoringSecret.Name == DefaultMonitoringSecretName { - defaultSecretQueriesAlreadyPresent = true - break - } - } + clusterLog.Info("Validation for Cluster upon update", "name", cluster.GetName(), "namespace", + cluster.GetNamespace()) - if !defaultSecretQueriesAlreadyPresent { - r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, - Key: DefaultMonitoringKey, - }, - }, r.Spec.Monitoring.CustomQueriesSecret...) - } - } -} + // applying defaults before validating updates to set any new default + oldCluster.SetDefaults() -// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed -func (r *Cluster) defaultInitDB() { - if r.Spec.Bootstrap.InitDB == nil { - r.Spec.Bootstrap.InitDB = &BootstrapInitDB{ - Database: DefaultApplicationDatabaseName, - Owner: DefaultApplicationUserName, - } - } + allErrs := append( + v.validate(cluster), + v.validateClusterChanges(cluster, oldCluster)..., + ) + allWarnings := v.getAdmissionWarnings(cluster) - if r.Spec.Bootstrap.InitDB.Database == "" { - r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName - } - if r.Spec.Bootstrap.InitDB.Owner == "" { - r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database - } - if r.Spec.Bootstrap.InitDB.Encoding == "" { - r.Spec.Bootstrap.InitDB.Encoding = "UTF8" - } - if r.Spec.Bootstrap.InitDB.LocaleCollate == "" { - r.Spec.Bootstrap.InitDB.LocaleCollate = "C" - } - if r.Spec.Bootstrap.InitDB.LocaleCType == "" { - r.Spec.Bootstrap.InitDB.LocaleCType = "C" + if len(allErrs) == 0 { + return allWarnings, nil } -} -// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed -func (r *Cluster) defaultRecovery() { - if r.Spec.Bootstrap.Recovery.Database == "" { - r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName - } - if r.Spec.Bootstrap.Recovery.Owner == "" { - r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database - } + return allWarnings, apierrors.NewInvalid( + schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"}, + cluster.Name, allErrs) } -// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed -func (r *Cluster) defaultPgBaseBackup() { - if r.Spec.Bootstrap.PgBaseBackup.Database == "" { - r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName - } - if r.Spec.Bootstrap.PgBaseBackup.Owner == "" { - r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Cluster. +func (v *ClusterCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object but got %T", obj) } -} + clusterLog.Info("Validation for Cluster upon deletion", "name", cluster.GetName(), "namespace", + cluster.GetNamespace()) -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-cluster,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,versions=v1,name=vcluster.cnpg.io,sideEffects=None - -var _ webhook.Validator = &Cluster{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateCreate() (admission.Warnings, error) { - clusterLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - allErrs := r.Validate() - allWarnings := r.getAdmissionWarnings() - - if len(allErrs) == 0 { - return allWarnings, nil - } + // TODO(user): fill in your validation logic upon object deletion. - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"}, - r.Name, allErrs) + return nil, nil } -// Validate groups the validation logic for clusters returning a list of all encountered errors -func (r *Cluster) Validate() (allErrs field.ErrorList) { - type validationFunc func() field.ErrorList +// validateCluster groups the validation logic for clusters returning a list of all encountered errors +func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.ErrorList) { + type validationFunc func(*apiv1.Cluster) field.ErrorList validations := []validationFunc{ - r.validateInitDB, - r.validateRecoveryApplicationDatabase, - r.validatePgBaseBackupApplicationDatabase, - r.validateImport, - r.validateSuperuserSecret, - r.validateCerts, - r.validateBootstrapMethod, - r.validateImageName, - r.validateImagePullPolicy, - r.validateRecoveryTarget, - r.validatePrimaryUpdateStrategy, - r.validateMinSyncReplicas, - r.validateMaxSyncReplicas, - r.validateStorageSize, - r.validateWalStorageSize, - r.validateEphemeralVolumeSource, - r.validateTablespaceStorageSize, - r.validateName, - r.validateTablespaceNames, - r.validateBootstrapPgBaseBackupSource, - r.validateTablespaceBackupSnapshot, - r.validateBootstrapRecoverySource, - r.validateBootstrapRecoveryDataSource, - r.validateExternalClusters, - r.validateTolerations, - r.validateAntiAffinity, - r.validateReplicaMode, - r.validateBackupConfiguration, - r.validateRetentionPolicy, - r.validateConfiguration, - r.validateLDAP, - r.validateReplicationSlots, - r.validateEnv, - r.validateManagedServices, - r.validateManagedRoles, - r.validateManagedExtensions, - r.validateResources, - r.validateHibernationAnnotation, - r.validatePromotionToken, + v.validateInitDB, + v.validateRecoveryApplicationDatabase, + v.validatePgBaseBackupApplicationDatabase, + v.validateImport, + v.validateSuperuserSecret, + v.validateCerts, + v.validateBootstrapMethod, + v.validateImageName, + v.validateImagePullPolicy, + v.validateRecoveryTarget, + v.validatePrimaryUpdateStrategy, + v.validateMinSyncReplicas, + v.validateMaxSyncReplicas, + v.validateStorageSize, + v.validateWalStorageSize, + v.validateEphemeralVolumeSource, + v.validateTablespaceStorageSize, + v.validateName, + v.validateTablespaceNames, + v.validateBootstrapPgBaseBackupSource, + v.validateTablespaceBackupSnapshot, + v.validateBootstrapRecoverySource, + v.validateBootstrapRecoveryDataSource, + v.validateExternalClusters, + v.validateTolerations, + v.validateAntiAffinity, + v.validateReplicaMode, + v.validateBackupConfiguration, + v.validateRetentionPolicy, + v.validateConfiguration, + v.validateSynchronousReplicaConfiguration, + v.validateFailoverQuorum, + v.validateLDAP, + v.validateReplicationSlots, + v.validateSynchronizeLogicalDecoding, + v.validateEnv, + v.validateManagedServices, + v.validateManagedRoles, + v.validateManagedExtensions, + v.validateResources, + v.validateHibernationAnnotation, + v.validatePodPatchAnnotation, + v.validatePromotionToken, + v.validatePluginConfiguration, + v.validateLivenessPingerProbe, + v.validateExtensions, } for _, validate := range validations { - allErrs = append(allErrs, validate()...) + allErrs = append(allErrs, validate(r)...) } return allErrs } -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { - clusterLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - oldCluster := old.(*Cluster) - - // applying defaults before validating updates to set any new default - oldCluster.SetDefaults() - - allErrs := append( - r.Validate(), - r.ValidateChanges(oldCluster)..., - ) - - if len(allErrs) == 0 { - return r.getAdmissionWarnings(), nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"}, - r.Name, allErrs) -} - -// ValidateChanges groups the validation logic for cluster changes checking the differences between +// validateClusterChanges groups the validation logic for cluster changes checking the differences between // the previous version and the new one of the cluster, returning a list of all encountered errors -func (r *Cluster) ValidateChanges(old *Cluster) (allErrs field.ErrorList) { +func (v *ClusterCustomValidator) validateClusterChanges(r, old *apiv1.Cluster) (allErrs field.ErrorList) { if old == nil { clusterLog.Info("Received invalid old object, skipping old object validation", "old", old) return nil } - type validationFunc func(old *Cluster) field.ErrorList + type validationFunc func(*apiv1.Cluster, *apiv1.Cluster) field.ErrorList validations := []validationFunc{ - r.validateImageChange, - r.validateConfigurationChange, - r.validateStorageChange, - r.validateWalStorageChange, - r.validateTablespacesChange, - r.validateUnixPermissionIdentifierChange, - r.validateReplicationSlotsChange, - r.validateWALLevelChange, - r.validateReplicaClusterChange, + v.validateImageChange, + v.validateConfigurationChange, + v.validateStorageChange, + v.validateWalStorageChange, + v.validateTablespacesChange, + v.validateUnixPermissionIdentifierChange, + v.validateReplicationSlotsChange, + v.validateWALLevelChange, + v.validateReplicaClusterChange, } for _, validate := range validations { - allErrs = append(allErrs, validate(old)...) + allErrs = append(allErrs, validate(r, old)...) } return allErrs } -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateDelete() (admission.Warnings, error) { - clusterLog.Info("validate delete", "name", r.Name) - - // TODO(user): fill in your validation logic upon object deletion. - return nil, nil -} - // validateLDAP validates the ldap postgres configuration -func (r *Cluster) validateLDAP() field.ErrorList { +func (v *ClusterCustomValidator) validateLDAP(r *apiv1.Cluster) field.ErrorList { // No validating if not specified if r.Spec.PostgresConfiguration.LDAP == nil { return nil @@ -457,7 +286,7 @@ func (r *Cluster) validateLDAP() field.ErrorList { } // validateEnv validate the environment variables settings proposed by the user -func (r *Cluster) validateEnv() field.ErrorList { +func (v *ClusterCustomValidator) validateEnv(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList for i := range r.Spec.Env { @@ -480,6 +309,9 @@ func isReservedEnvironmentVariable(name string) bool { name = strings.ToUpper(name) switch { + case strings.HasPrefix(name, "CNPG_"): + return true + case strings.HasPrefix(name, "PG"): return true @@ -498,7 +330,7 @@ func isReservedEnvironmentVariable(name string) bool { // validateInitDB validate the bootstrapping options when initdb // method is used -func (r *Cluster) validateInitDB() field.ErrorList { +func (v *ClusterCustomValidator) validateInitDB(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -513,7 +345,7 @@ func (r *Cluster) validateInitDB() field.ErrorList { // If you specify the database name, then you need also to specify the // owner user and vice-versa initDBOptions := r.Spec.Bootstrap.InitDB - result = r.validateApplicationDatabase(initDBOptions.Database, initDBOptions.Owner, + result = v.validateApplicationDatabase(initDBOptions.Database, initDBOptions.Owner, "initdb") if initDBOptions.WalSegmentSize != 0 && !utils.IsPowerOfTwo(initDBOptions.WalSegmentSize) { @@ -552,7 +384,7 @@ func (r *Cluster) validateInitDB() field.ErrorList { return result } -func (r *Cluster) validateImport() field.ErrorList { +func (v *ClusterCustomValidator) validateImport(r *apiv1.Cluster) field.ErrorList { // If it's not configured, everything is ok if r.Spec.Bootstrap == nil { return nil @@ -568,10 +400,10 @@ func (r *Cluster) validateImport() field.ErrorList { } switch importSpec.Type { - case MicroserviceSnapshotType: - return importSpec.validateMicroservice() - case MonolithSnapshotType: - return importSpec.validateMonolith() + case apiv1.MicroserviceSnapshotType: + return v.validateMicroservice(importSpec) + case apiv1.MonolithSnapshotType: + return v.validateMonolith(importSpec) default: return field.ErrorList{ field.Invalid( @@ -582,7 +414,7 @@ func (r *Cluster) validateImport() field.ErrorList { } } -func (s Import) validateMicroservice() field.ErrorList { +func (v *ClusterCustomValidator) validateMicroservice(s *apiv1.Import) field.ErrorList { var result field.ErrorList if len(s.Databases) != 1 { @@ -618,7 +450,7 @@ func (s Import) validateMicroservice() field.ErrorList { return result } -func (s Import) validateMonolith() field.ErrorList { +func (v *ClusterCustomValidator) validateMonolith(s *apiv1.Import) field.ErrorList { var result field.ErrorList if len(s.Databases) < 1 { @@ -666,7 +498,7 @@ func (s Import) validateMonolith() field.ErrorList { // validateRecovery validate the bootstrapping options when Recovery // method is used -func (r *Cluster) validateRecoveryApplicationDatabase() field.ErrorList { +func (v *ClusterCustomValidator) validateRecoveryApplicationDatabase(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -679,13 +511,12 @@ func (r *Cluster) validateRecoveryApplicationDatabase() field.ErrorList { } recoveryOptions := r.Spec.Bootstrap.Recovery - return r.validateApplicationDatabase(recoveryOptions.Database, recoveryOptions.Owner, - "recovery") + return v.validateApplicationDatabase(recoveryOptions.Database, recoveryOptions.Owner, "recovery") } // validatePgBaseBackup validate the bootstrapping options when pg_basebackup // method is used -func (r *Cluster) validatePgBaseBackupApplicationDatabase() field.ErrorList { +func (v *ClusterCustomValidator) validatePgBaseBackupApplicationDatabase(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -698,19 +529,19 @@ func (r *Cluster) validatePgBaseBackupApplicationDatabase() field.ErrorList { } pgBaseBackupOptions := r.Spec.Bootstrap.PgBaseBackup - return r.validateApplicationDatabase(pgBaseBackupOptions.Database, pgBaseBackupOptions.Owner, + return v.validateApplicationDatabase(pgBaseBackupOptions.Database, pgBaseBackupOptions.Owner, "pg_basebackup") } // validateApplicationDatabase validate the configuration for application database -func (r *Cluster) validateApplicationDatabase( +func (v *ClusterCustomValidator) validateApplicationDatabase( database string, owner string, command string, ) field.ErrorList { var result field.ErrorList // If you specify the database name, then you need also to specify the - // owner user and vice-versa + // owner user and vice versa if database != "" && owner == "" { result = append( result, @@ -731,7 +562,7 @@ func (r *Cluster) validateApplicationDatabase( } // validateCerts validate all the provided certs -func (r *Cluster) validateCerts() field.ErrorList { +func (v *ClusterCustomValidator) validateCerts(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList certificates := r.Spec.Certificates @@ -775,7 +606,7 @@ func (r *Cluster) validateCerts() field.ErrorList { } // ValidateSuperuserSecret validate super user secret value -func (r *Cluster) validateSuperuserSecret() field.ErrorList { +func (v *ClusterCustomValidator) validateSuperuserSecret(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If empty, we're ok! @@ -798,7 +629,7 @@ func (r *Cluster) validateSuperuserSecret() field.ErrorList { // validateBootstrapMethod is used to ensure we have only one // bootstrap methods active -func (r *Cluster) validateBootstrapMethod() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapMethod(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -820,10 +651,9 @@ func (r *Cluster) validateBootstrapMethod() field.ErrorList { if bootstrapMethods > 1 { result = append( result, - field.Invalid( + field.Forbidden( field.NewPath("spec", "bootstrap"), - "", - "Too many bootstrap types specified")) + "Only one bootstrap method can be specified at a time")) } return result @@ -831,7 +661,7 @@ func (r *Cluster) validateBootstrapMethod() field.ErrorList { // validateBootstrapPgBaseBackupSource is used to ensure that the source // server is correctly defined -func (r *Cluster) validateBootstrapPgBaseBackupSource() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapPgBaseBackupSource(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // This validation is only applicable for physical backup @@ -855,7 +685,7 @@ func (r *Cluster) validateBootstrapPgBaseBackupSource() field.ErrorList { // validateBootstrapRecoverySource is used to ensure that the source // server is correctly defined -func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapRecoverySource(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // This validation is only applicable for recovery based bootstrap @@ -863,7 +693,9 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { return result } - _, found := r.ExternalCluster(r.Spec.Bootstrap.Recovery.Source) + externalCluster, found := r.ExternalCluster(r.Spec.Bootstrap.Recovery.Source) + + // Ensure the existence of the external cluster if !found { result = append( result, @@ -873,12 +705,24 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { fmt.Sprintf("External cluster %v not found", r.Spec.Bootstrap.Recovery.Source))) } + // Ensure the external cluster definition has enough information + // to be used to recover a data directory + if externalCluster.BarmanObjectStore == nil && externalCluster.PluginConfiguration == nil { + result = append( + result, + field.Invalid( + field.NewPath("spec", "bootstrap", "recovery", "source"), + r.Spec.Bootstrap.Recovery.Source, + fmt.Sprintf("External cluster %v cannot be used for recovery: "+ + "both Barman and CNPG-i plugin configurations are missing", r.Spec.Bootstrap.Recovery.Source))) + } + return result } // validateBootstrapRecoveryDataSource is used to ensure that the data // source is correctly defined -func (r *Cluster) validateBootstrapRecoveryDataSource() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapRecoveryDataSource(r *apiv1.Cluster) field.ErrorList { // This validation is only applicable for datasource-based recovery based bootstrap if r.Spec.Bootstrap == nil || r.Spec.Bootstrap.Recovery == nil || r.Spec.Bootstrap.Recovery.VolumeSnapshots == nil { return nil @@ -934,7 +778,7 @@ func (r *Cluster) validateBootstrapRecoveryDataSource() field.ErrorList { // validateVolumeSnapshotSource validates a source of a recovery snapshot. // The supported resources are VolumeSnapshots and PersistentVolumeClaim func validateVolumeSnapshotSource( - value v1.TypedLocalObjectReference, + value corev1.TypedLocalObjectReference, path *field.Path, ) field.ErrorList { apiGroup := "" @@ -944,7 +788,7 @@ func validateVolumeSnapshotSource( switch { case apiGroup == storagesnapshotv1.GroupName && value.Kind == "VolumeSnapshot": - case apiGroup == v1.GroupName && value.Kind == "PersistentVolumeClaim": + case apiGroup == corev1.GroupName && value.Kind == "PersistentVolumeClaim": default: return field.ErrorList{ field.Invalid(path, value, "Only VolumeSnapshots and PersistentVolumeClaims are supported"), @@ -956,7 +800,7 @@ func validateVolumeSnapshotSource( // validateImageName validates the image name ensuring we aren't // using the "latest" tag -func (r *Cluster) validateImageName() field.ErrorList { +func (v *ClusterCustomValidator) validateImageName(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.ImageName == "" { @@ -965,7 +809,7 @@ func (r *Cluster) validateImageName() field.ErrorList { } // We have to check if the image has a valid tag - tag := utils.GetImageTag(r.Spec.ImageName) + tag := reference.New(r.Spec.ImageName).Tag switch tag { case "latest": result = append( @@ -982,7 +826,7 @@ func (r *Cluster) validateImageName() field.ErrorList { r.Spec.ImageName, "Can't use just the image sha as we can't detect upgrades")) default: - _, err := postgres.GetPostgresVersionFromTag(tag) + _, err := version.FromTag(tag) if err != nil { result = append( result, @@ -997,11 +841,11 @@ func (r *Cluster) validateImageName() field.ErrorList { // validateImagePullPolicy validates the image pull policy, // ensuring it is one of "Always", "Never" or "IfNotPresent" when defined -func (r *Cluster) validateImagePullPolicy() field.ErrorList { +func (v *ClusterCustomValidator) validateImagePullPolicy(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList switch r.Spec.ImagePullPolicy { - case v1.PullAlways, v1.PullNever, v1.PullIfNotPresent, "": + case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent, "": return result default: return append( @@ -1010,52 +854,62 @@ func (r *Cluster) validateImagePullPolicy() field.ErrorList { field.NewPath("spec", "imagePullPolicy"), r.Spec.ImagePullPolicy, fmt.Sprintf("invalid imagePullPolicy, if defined must be one of '%s', '%s' or '%s'", - v1.PullAlways, v1.PullNever, v1.PullIfNotPresent))) + corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent))) } } -func (r *Cluster) validateResources() field.ErrorList { +func (v *ClusterCustomValidator) validateResources(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList - cpuRequest := r.Spec.Resources.Requests.Cpu() + cpuRequests := r.Spec.Resources.Requests.Cpu() cpuLimits := r.Spec.Resources.Limits.Cpu() - if !cpuRequest.IsZero() && !cpuLimits.IsZero() { - cpuRequestGtThanLimit := cpuRequest.Cmp(*cpuLimits) > 0 + if !cpuRequests.IsZero() && !cpuLimits.IsZero() { + cpuRequestGtThanLimit := cpuRequests.Cmp(*cpuLimits) > 0 if cpuRequestGtThanLimit { result = append(result, field.Invalid( field.NewPath("spec", "resources", "requests", "cpu"), - cpuRequest.String(), + cpuRequests.String(), "CPU request is greater than the limit", )) } } - memoryRequest := r.Spec.Resources.Requests.Memory() - rawSharedBuffer := r.Spec.PostgresConfiguration.Parameters[sharedBuffersParameter] - if !memoryRequest.IsZero() && rawSharedBuffer != "" { - if sharedBuffers, err := parsePostgresQuantityValue(rawSharedBuffer); err == nil { - if memoryRequest.Cmp(sharedBuffers) < 0 { - result = append(result, field.Invalid( - field.NewPath("spec", "resources", "requests", "memory"), - memoryRequest.String(), - "Memory request is lower than PostgreSQL `shared_buffers` value", - )) - } - } - } - + memoryRequests := r.Spec.Resources.Requests.Memory() memoryLimits := r.Spec.Resources.Limits.Memory() - if !memoryRequest.IsZero() && !memoryLimits.IsZero() { - memoryRequestGtThanLimit := memoryRequest.Cmp(*memoryLimits) > 0 + if !memoryRequests.IsZero() && !memoryLimits.IsZero() { + memoryRequestGtThanLimit := memoryRequests.Cmp(*memoryLimits) > 0 if memoryRequestGtThanLimit { result = append(result, field.Invalid( field.NewPath("spec", "resources", "requests", "memory"), - memoryRequest.String(), + memoryRequests.String(), "Memory request is greater than the limit", )) } } + hugePages, hugePagesErrors := validateHugePagesResources(r) + result = append(result, hugePagesErrors...) + if cpuRequests.IsZero() && cpuLimits.IsZero() && memoryRequests.IsZero() && memoryLimits.IsZero() && + len(hugePages) > 0 { + result = append(result, field.Forbidden( + field.NewPath("spec", "resources"), + "HugePages require cpu or memory", + )) + } + + rawSharedBuffer := r.Spec.PostgresConfiguration.Parameters[sharedBuffersParameter] + if rawSharedBuffer != "" { + if sharedBuffers, err := parsePostgresQuantityValue(rawSharedBuffer); err == nil { + if !hasEnoughMemoryForSharedBuffers(sharedBuffers, memoryRequests, hugePages) { + result = append(result, field.Invalid( + field.NewPath("spec", "resources", "requests"), + memoryRequests.String(), + "Memory request is lower than PostgreSQL `shared_buffers` value", + )) + } + } + } + ephemeralStorageRequest := r.Spec.Resources.Requests.StorageEphemeral() ephemeralStorageLimits := r.Spec.Resources.Limits.StorageEphemeral() if !ephemeralStorageRequest.IsZero() && !ephemeralStorageLimits.IsZero() { @@ -1072,8 +926,126 @@ func (r *Cluster) validateResources() field.ErrorList { return result } +func validateHugePagesResources(r *apiv1.Cluster) (map[corev1.ResourceName]resource.Quantity, field.ErrorList) { + var result field.ErrorList + hugepages := make(map[corev1.ResourceName]resource.Quantity) + for name, quantity := range r.Spec.Resources.Limits { + if strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) { + hugepages[name] = quantity + } + } + for name, quantity := range r.Spec.Resources.Requests { + if strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) { + if existingQuantity, exists := hugepages[name]; exists { + if existingQuantity.Cmp(quantity) != 0 { + result = append(result, field.Invalid( + field.NewPath("spec", "resources", "requests", string(name)), + quantity.String(), + "HugePages requests must equal the limits", + )) + } + continue + } + hugepages[name] = quantity + } + } + return hugepages, result +} + +func hasEnoughMemoryForSharedBuffers( + sharedBuffers resource.Quantity, + memoryRequest *resource.Quantity, + hugePages map[corev1.ResourceName]resource.Quantity, +) bool { + if memoryRequest.IsZero() || sharedBuffers.Cmp(*memoryRequest) <= 0 { + return true + } + + for _, quantity := range hugePages { + if sharedBuffers.Cmp(quantity) <= 0 { + return true + } + } + + return false +} + +func (v *ClusterCustomValidator) validateSynchronousReplicaConfiguration(r *apiv1.Cluster) field.ErrorList { + if r.Spec.PostgresConfiguration.Synchronous == nil { + return nil + } + + var result field.ErrorList + + cfg := r.Spec.PostgresConfiguration.Synchronous + if cfg.Number >= (r.Spec.Instances + + len(cfg.StandbyNamesPost) + + len(cfg.StandbyNamesPre)) { + err := field.Invalid( + field.NewPath("spec", "postgresql", "synchronous"), + cfg, + "Invalid synchronous configuration: the number of synchronous replicas must be less than the "+ + "total number of instances and the provided standby names.", + ) + result = append(result, err) + } + + return result +} + +func (v *ClusterCustomValidator) validateFailoverQuorum(r *apiv1.Cluster) field.ErrorList { + var result field.ErrorList + + failoverQuorumActive, err := r.IsFailoverQuorumActive() + if err != nil { + err := field.Invalid( + field.NewPath("metadata", "annotations", utils.FailoverQuorumAnnotationName), + r.Annotations[utils.FailoverQuorumAnnotationName], + "Invalid failoverQuorum annotation value, expected boolean.", + ) + result = append(result, err) + return result + } + if !failoverQuorumActive { + return nil + } + + cfg := r.Spec.PostgresConfiguration.Synchronous + if cfg == nil { + err := field.Required( + field.NewPath("spec", "postgresql", "synchronous"), + "Invalid failoverQuorum configuration: synchronous replication configuration "+ + "is required.", + ) + result = append(result, err) + return result + } + + if cfg.Number <= len(cfg.StandbyNamesPost)+len(cfg.StandbyNamesPre) { + err := field.Invalid( + field.NewPath("spec", "postgresql", "synchronous"), + cfg, + "Invalid failoverQuorum configuration: spec.postgresql.synchronous.number must the greater than "+ + "the total number of instances in spec.postgresql.synchronous.standbyNamesPre and "+ + "spec.postgresql.synchronous.standbyNamesPost to allow automatic failover.", + ) + result = append(result, err) + } + + if r.Spec.Instances <= 2 { + err := field.Invalid( + field.NewPath("spec", "instances"), + r.Spec.Instances, + "failoverQuorum requires more than 2 instances.", + ) + result = append(result, err) + } + + return result +} + // validateConfiguration determines whether a PostgreSQL configuration is valid -func (r *Cluster) validateConfiguration() field.ErrorList { +func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // We cannot have both old-style synchronous replica configuration @@ -1088,22 +1060,22 @@ func (r *Cluster) validateConfiguration() field.ErrorList { "Can't have both legacy synchronous replica configuration and new one")) } - pgVersion, err := r.GetPostgresqlVersion() + pgMajor, err := r.GetPostgresqlMajorVersion() if err != nil { // The validation error will be already raised by the // validateImageName function return result } - if pgVersion < 110000 { + if pgMajor < 13 { result = append(result, field.Invalid( field.NewPath("spec", "imageName"), r.Spec.ImageName, - "Unsupported PostgreSQL version. Versions 11 or newer are supported")) + "Unsupported PostgreSQL version. Versions 13 or newer are supported")) } info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - MajorVersion: pgVersion, + MajorVersion: pgMajor, UserSettings: r.Spec.PostgresConfiguration.Parameters, IsReplicaCluster: r.IsReplica(), IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), @@ -1174,25 +1146,25 @@ func (r *Cluster) validateConfiguration() field.ErrorList { } } - walLogHintsValue, walLogHintsSet := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints] - if walLogHintsSet { - walLogHintsActivated, err := postgres.ParsePostgresConfigBoolean(walLogHintsValue) - if err != nil { - result = append( - result, - field.Invalid( - field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints), - walLogHintsValue, - "invalid `wal_log_hints`. Must be a postgres boolean")) - } - if r.Spec.Instances > 1 && !walLogHintsActivated { - result = append( - result, - field.Invalid( - field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints), - r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints], - "`wal_log_hints` must be set to `on` when `instances` > 1")) - } + if _, fieldError := tryParseBooleanPostgresParameter(r, postgres.ParameterHotStandbyFeedback); fieldError != nil { + result = append(result, fieldError) + } + + if _, fieldError := tryParseBooleanPostgresParameter(r, postgres.ParameterSyncReplicationSlots); fieldError != nil { + result = append(result, fieldError) + } + + walLogHintsActivated, fieldError := tryParseBooleanPostgresParameter(r, postgres.ParameterWalLogHints) + if fieldError != nil { + result = append(result, fieldError) + } + if walLogHintsActivated != nil && !*walLogHintsActivated && r.Spec.Instances > 1 { + result = append( + result, + field.Invalid( + field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints), + r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints], + "`wal_log_hints` must be set to `on` when `instances` > 1")) } // verify the postgres setting min_wal_size < max_wal_size < volume size @@ -1208,9 +1180,27 @@ func (r *Cluster) validateConfiguration() field.ErrorList { return result } +// tryParseBooleanPostgresParameter attempts to parse a boolean PostgreSQL parameter +// from the cluster specification. If the parameter is not set, it returns nil. +func tryParseBooleanPostgresParameter(r *apiv1.Cluster, parameterName string) (*bool, *field.Error) { + stringValue, hasParameter := r.Spec.PostgresConfiguration.Parameters[parameterName] + if !hasParameter { + return nil, nil + } + + value, err := postgres.ParsePostgresConfigBoolean(stringValue) + if err != nil { + return nil, field.Invalid( + field.NewPath("spec", "postgresql", "parameters", parameterName), + stringValue, + fmt.Sprintf("invalid `%s` value. Must be a postgres boolean", parameterName)) + } + return &value, nil +} + // validateWalSizeConfiguration verifies that min_wal_size < max_wal_size < wal volume size func validateWalSizeConfiguration( - postgresConfig PostgresConfiguration, walVolumeSize *resource.Quantity, + postgresConfig apiv1.PostgresConfiguration, walVolumeSize *resource.Quantity, ) field.ErrorList { const ( minWalSizeKey = "min_wal_size" @@ -1326,7 +1316,7 @@ func parsePostgresQuantityValue(value string) (resource.Quantity, error) { // validateConfigurationChange determines whether a PostgreSQL configuration // change can be applied -func (r *Cluster) validateConfigurationChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateConfigurationChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList if old.Spec.ImageName != r.Spec.ImageName { @@ -1348,7 +1338,7 @@ func (r *Cluster) validateConfigurationChange(old *Cluster) field.ErrorList { return result } -func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstraints) *field.Error { +func validateSyncReplicaElectionConstraint(constraints apiv1.SyncReplicaElectionConstraints) *field.Error { if !constraints.Enabled { return nil } @@ -1367,52 +1357,54 @@ func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstr // validateImageChange validate the change from a certain image name // to a new one. -func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList - var newMajor, oldMajor int - var err error - var newImagePath *field.Path + var fieldPath *field.Path if r.Spec.ImageCatalogRef != nil { - newImagePath = field.NewPath("spec", "imageCatalogRef") + fieldPath = field.NewPath("spec", "imageCatalogRef", "major") } else { - newImagePath = field.NewPath("spec", "imageName") + fieldPath = field.NewPath("spec", "imageName") } - r.Status.Image = "" - newMajor, err = r.GetPostgresqlVersion() + newVersion, err := r.GetPostgresqlMajorVersion() if err != nil { // The validation error will be already raised by the // validateImageName function return result } - old.Status.Image = "" - oldMajor, err = old.GetPostgresqlVersion() - if err != nil { - // The validation error will be already raised by the - // validateImageName function + if old.Status.PGDataImageInfo == nil { return result } + oldVersion := old.Status.PGDataImageInfo.MajorVersion - status := postgres.IsUpgradePossible(oldMajor, newMajor) - - if !status { + if oldVersion > newVersion { result = append( result, field.Invalid( - newImagePath, - newMajor, - fmt.Sprintf("can't upgrade between majors %v and %v", - oldMajor, newMajor))) + fieldPath, + strconv.Itoa(newVersion), + fmt.Sprintf("can't downgrade from major %v to %v", oldVersion, newVersion))) } + // TODO: Upgrading to versions 14 and 15 would require carrying information around about the collation used. + // See https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=9637badd9. + // This is not implemented yet, and users should not upgrade to old versions anyway, so we are blocking it. + if oldVersion < newVersion && newVersion < 16 { + result = append( + result, + field.Invalid( + fieldPath, + strconv.Itoa(newVersion), + "major upgrades are only supported to version 16 or higher")) + } return result } // Validate the recovery target to ensure that the mutual exclusivity // of options is respected and plus validating the format of targetTime // if specified -func (r *Cluster) validateRecoveryTarget() field.ErrorList { +func (v *ClusterCustomValidator) validateRecoveryTarget(r *apiv1.Cluster) field.ErrorList { if r.Spec.Bootstrap == nil || r.Spec.Bootstrap.Recovery == nil { return nil } @@ -1477,7 +1469,7 @@ func (r *Cluster) validateRecoveryTarget() field.ErrorList { return result } -func validateTargetExclusiveness(recoveryTarget *RecoveryTarget) field.ErrorList { +func validateTargetExclusiveness(recoveryTarget *apiv1.RecoveryTarget) field.ErrorList { targets := 0 if recoveryTarget.TargetImmediate != nil { targets++ @@ -1508,15 +1500,15 @@ func validateTargetExclusiveness(recoveryTarget *RecoveryTarget) field.ErrorList // Validate the update strategy related to the number of required // instances -func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList { +func (v *ClusterCustomValidator) validatePrimaryUpdateStrategy(r *apiv1.Cluster) field.ErrorList { if r.Spec.PrimaryUpdateStrategy == "" { return nil } var result field.ErrorList - if r.Spec.PrimaryUpdateStrategy != PrimaryUpdateStrategySupervised && - r.Spec.PrimaryUpdateStrategy != PrimaryUpdateStrategyUnsupervised { + if r.Spec.PrimaryUpdateStrategy != apiv1.PrimaryUpdateStrategySupervised && + r.Spec.PrimaryUpdateStrategy != apiv1.PrimaryUpdateStrategyUnsupervised { result = append(result, field.Invalid( field.NewPath("spec", "primaryUpdateStrategy"), r.Spec.PrimaryUpdateStrategy, @@ -1524,7 +1516,7 @@ func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList { return result } - if r.Spec.PrimaryUpdateStrategy == PrimaryUpdateStrategySupervised && r.Spec.Instances == 1 { + if r.Spec.PrimaryUpdateStrategy == apiv1.PrimaryUpdateStrategySupervised && r.Spec.Instances == 1 { result = append(result, field.Invalid( field.NewPath("spec", "primaryUpdateStrategy"), r.Spec.PrimaryUpdateStrategy, @@ -1537,7 +1529,7 @@ func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList { // Validate the maximum number of synchronous instances // that should be kept in sync with the primary server -func (r *Cluster) validateMaxSyncReplicas() field.ErrorList { +func (v *ClusterCustomValidator) validateMaxSyncReplicas(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.MaxSyncReplicas < 0 { @@ -1558,7 +1550,7 @@ func (r *Cluster) validateMaxSyncReplicas() field.ErrorList { } // Validate the minimum number of synchronous instances -func (r *Cluster) validateMinSyncReplicas() field.ErrorList { +func (v *ClusterCustomValidator) validateMinSyncReplicas(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.MinSyncReplicas < 0 { @@ -1578,11 +1570,11 @@ func (r *Cluster) validateMinSyncReplicas() field.ErrorList { return result } -func (r *Cluster) validateStorageSize() field.ErrorList { +func (v *ClusterCustomValidator) validateStorageSize(r *apiv1.Cluster) field.ErrorList { return validateStorageConfigurationSize(*field.NewPath("spec", "storage"), r.Spec.StorageConfiguration) } -func (r *Cluster) validateWalStorageSize() field.ErrorList { +func (v *ClusterCustomValidator) validateWalStorageSize(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.ShouldCreateWalArchiveVolume() { @@ -1593,7 +1585,7 @@ func (r *Cluster) validateWalStorageSize() field.ErrorList { return result } -func (r *Cluster) validateEphemeralVolumeSource() field.ErrorList { +func (v *ClusterCustomValidator) validateEphemeralVolumeSource(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.EphemeralVolumeSource != nil && (r.Spec.EphemeralVolumesSizeLimit != nil && @@ -1608,7 +1600,7 @@ func (r *Cluster) validateEphemeralVolumeSource() field.ErrorList { return result } -func (r *Cluster) validateTablespaceStorageSize() field.ErrorList { +func (v *ClusterCustomValidator) validateTablespaceStorageSize(r *apiv1.Cluster) field.ErrorList { if r.Spec.Tablespaces == nil { return nil } @@ -1627,7 +1619,7 @@ func (r *Cluster) validateTablespaceStorageSize() field.ErrorList { func validateStorageConfigurationSize( structPath field.Path, - storageConfiguration StorageConfiguration, + storageConfiguration apiv1.StorageConfiguration, ) field.ErrorList { var result field.ErrorList @@ -1653,7 +1645,7 @@ func validateStorageConfigurationSize( } // Validate a change in the storage -func (r *Cluster) validateStorageChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateStorageChange(r, old *apiv1.Cluster) field.ErrorList { return validateStorageConfigurationChange( field.NewPath("spec", "storage"), old.Spec.StorageConfiguration, @@ -1661,7 +1653,7 @@ func (r *Cluster) validateStorageChange(old *Cluster) field.ErrorList { ) } -func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateWalStorageChange(r, old *apiv1.Cluster) field.ErrorList { if old.Spec.WalStorage == nil { return nil } @@ -1671,7 +1663,7 @@ func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList { field.Invalid( field.NewPath("spec", "walStorage"), r.Spec.WalStorage, - "walStorage cannot be disabled once the cluster is created"), + "walStorage cannot be disabled once configured"), } } @@ -1684,7 +1676,7 @@ func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList { // validateTablespacesChange checks that no tablespaces have been deleted, and that // no tablespaces have an invalid storage update -func (r *Cluster) validateTablespacesChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateTablespacesChange(r, old *apiv1.Cluster) field.ErrorList { if old.Spec.Tablespaces == nil { return nil } @@ -1721,8 +1713,8 @@ func (r *Cluster) validateTablespacesChange(old *Cluster) field.ErrorList { // validateStorageConfigurationChange generates an error list by comparing two StorageConfiguration func validateStorageConfigurationChange( structPath *field.Path, - oldStorage StorageConfiguration, - newStorage StorageConfiguration, + oldStorage apiv1.StorageConfiguration, + newStorage apiv1.StorageConfiguration, ) field.ErrorList { oldSize := oldStorage.GetSizeOrNil() if oldSize == nil { @@ -1752,7 +1744,7 @@ func validateStorageConfigurationChange( // Validate the cluster name. This is important to avoid issues // while generating services, which don't support having dots in // their name -func (r *Cluster) validateName() field.ErrorList { +func (v *ClusterCustomValidator) validateName(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if errs := validationutil.IsDNS1035Label(r.Name); len(errs) > 0 { @@ -1772,7 +1764,7 @@ func (r *Cluster) validateName() field.ErrorList { return result } -func (r *Cluster) validateTablespaceNames() field.ErrorList { +func (v *ClusterCustomValidator) validateTablespaceNames(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.Tablespaces == nil { return nil @@ -1803,7 +1795,7 @@ func (r *Cluster) validateTablespaceNames() field.ErrorList { return result } -func (r *Cluster) validateTablespaceBackupSnapshot() field.ErrorList { +func (v *ClusterCustomValidator) validateTablespaceBackupSnapshot(r *apiv1.Cluster) field.ErrorList { if r.Spec.Backup == nil || r.Spec.Backup.VolumeSnapshot == nil || len(r.Spec.Backup.VolumeSnapshot.TablespaceClassName) == 0 { return nil @@ -1825,7 +1817,7 @@ func (r *Cluster) validateTablespaceBackupSnapshot() field.ErrorList { } // Check if the external clusters list contains two servers with the same name -func (r *Cluster) validateExternalClusters() field.ErrorList { +func (v *ClusterCustomValidator) validateExternalClusters(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList stringSet := stringset.New() @@ -1834,7 +1826,7 @@ func (r *Cluster) validateExternalClusters() field.ErrorList { stringSet.Put(externalCluster.Name) result = append( result, - r.validateExternalCluster(&r.Spec.ExternalClusters[idx], path)...) + v.validateExternalCluster(&r.Spec.ExternalClusters[idx], path)...) } if stringSet.Len() != len(r.Spec.ExternalClusters) { @@ -1848,21 +1840,26 @@ func (r *Cluster) validateExternalClusters() field.ErrorList { } // validateExternalCluster check the validity of a certain ExternalCluster -func (r *Cluster) validateExternalCluster(externalCluster *ExternalCluster, path *field.Path) field.ErrorList { +func (v *ClusterCustomValidator) validateExternalCluster( + externalCluster *apiv1.ExternalCluster, + path *field.Path, +) field.ErrorList { var result field.ErrorList - if externalCluster.ConnectionParameters == nil && externalCluster.BarmanObjectStore == nil { + if externalCluster.ConnectionParameters == nil && + externalCluster.BarmanObjectStore == nil && + externalCluster.PluginConfiguration == nil { result = append(result, field.Invalid( path, externalCluster, - "one of connectionParameters and barmanObjectStore is required")) + "one of connectionParameters, plugin and barmanObjectStore is required")) } return result } -func (r *Cluster) validateReplicaClusterChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateReplicaClusterChange(r, old *apiv1.Cluster) field.ErrorList { // If the replication role didn't change then everything // is fine if r.IsReplica() == old.IsReplica() { @@ -1883,7 +1880,7 @@ func (r *Cluster) validateReplicaClusterChange(old *Cluster) field.ErrorList { return nil } -func (r *Cluster) validateUnixPermissionIdentifierChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateUnixPermissionIdentifierChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.PostgresGID != old.Spec.PostgresGID { @@ -1903,7 +1900,7 @@ func (r *Cluster) validateUnixPermissionIdentifierChange(old *Cluster) field.Err return result } -func (r *Cluster) validatePromotionToken() field.ErrorList { +func (v *ClusterCustomValidator) validatePromotionToken(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.ReplicaCluster == nil { @@ -1959,7 +1956,7 @@ func (r *Cluster) validatePromotionToken() field.ErrorList { // Check if the replica mode is used with an incompatible bootstrap // method -func (r *Cluster) validateReplicaMode() field.ErrorList { +func (v *ClusterCustomValidator) validateReplicaMode(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList replicaClusterConf := r.Spec.ReplicaCluster @@ -1988,7 +1985,7 @@ func (r *Cluster) validateReplicaMode() field.ErrorList { } else if r.Spec.Bootstrap.PgBaseBackup == nil && r.Spec.Bootstrap.Recovery == nil && // this is needed because we only want to validate this during cluster creation, currently if we would have // to enable this logic only during creation and not cluster changes it would require a meaningful refactor - len(r.ObjectMeta.ResourceVersion) == 0 { + len(r.ResourceVersion) == 0 { result = append(result, field.Invalid( field.NewPath("spec", "replicaCluster"), replicaClusterConf, @@ -1996,12 +1993,12 @@ func (r *Cluster) validateReplicaMode() field.ErrorList { } } - result = append(result, r.validateReplicaClusterExternalClusters()...) + result = append(result, v.validateReplicaClusterExternalClusters(r)...) return result } -func (r *Cluster) validateReplicaClusterExternalClusters() field.ErrorList { +func (v *ClusterCustomValidator) validateReplicaClusterExternalClusters(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList replicaClusterConf := r.Spec.ReplicaCluster if replicaClusterConf == nil { @@ -2048,7 +2045,7 @@ func (r *Cluster) validateReplicaClusterExternalClusters() field.ErrorList { // validateTolerations check and validate the tolerations field // This code is almost a verbatim copy of // https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3147 -func (r *Cluster) validateTolerations() field.ErrorList { +func (v *ClusterCustomValidator) validateTolerations(r *apiv1.Cluster) field.ErrorList { path := field.NewPath("spec", "affinity", "toleration") allErrors := field.ErrorList{} for i, toleration := range r.Spec.Affinity.Tolerations { @@ -2059,14 +2056,14 @@ func (r *Cluster) validateTolerations() field.ErrorList { } // empty toleration key with Exists operator and empty value means match all taints - if len(toleration.Key) == 0 && toleration.Operator != v1.TolerationOpExists { + if len(toleration.Key) == 0 && toleration.Operator != corev1.TolerationOpExists { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) } - if toleration.TolerationSeconds != nil && toleration.Effect != v1.TaintEffectNoExecute { + if toleration.TolerationSeconds != nil && toleration.Effect != corev1.TaintEffectNoExecute { allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, @@ -2076,20 +2073,20 @@ func (r *Cluster) validateTolerations() field.ErrorList { // validate toleration operator and value switch toleration.Operator { // empty operator means Equal - case v1.TolerationOpEqual, "": + case corev1.TolerationOpEqual, "": if errs := validationutil.IsValidLabelValue(toleration.Value); len(errs) != 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) } - case v1.TolerationOpExists: + case corev1.TolerationOpExists: if len(toleration.Value) > 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) } default: - validValues := []string{string(v1.TolerationOpEqual), string(v1.TolerationOpExists)} + validValues := []string{string(corev1.TolerationOpEqual), string(corev1.TolerationOpExists)} allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) @@ -2104,9 +2101,9 @@ func (r *Cluster) validateTolerations() field.ErrorList { return allErrors } -// validateTaintEffect is used from validateTollerations and is a verbatim copy of the code +// validateTaintEffect is used from validateToleration and is a verbatim copy of the code // at https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3087 -func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { +func validateTaintEffect(effect *corev1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { if !allowEmpty && len(*effect) == 0 { return field.ErrorList{field.Required(fldPath, "")} } @@ -2114,14 +2111,14 @@ func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field allErrors := field.ErrorList{} switch *effect { // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. - case v1.TaintEffectNoSchedule, v1.TaintEffectPreferNoSchedule, v1.TaintEffectNoExecute: + case corev1.TaintEffectNoSchedule, corev1.TaintEffectPreferNoSchedule, corev1.TaintEffectNoExecute: // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, // core.TaintEffectNoExecute: default: validValues := []string{ - string(v1.TaintEffectNoSchedule), - string(v1.TaintEffectPreferNoSchedule), - string(v1.TaintEffectNoExecute), + string(corev1.TaintEffectNoSchedule), + string(corev1.TaintEffectPreferNoSchedule), + string(corev1.TaintEffectNoExecute), // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. // string(core.TaintEffectNoScheduleNoAdmit), } @@ -2131,25 +2128,25 @@ func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field } // validateAntiAffinity checks and validates the anti-affinity fields. -func (r *Cluster) validateAntiAffinity() field.ErrorList { +func (v *ClusterCustomValidator) validateAntiAffinity(r *apiv1.Cluster) field.ErrorList { path := field.NewPath("spec", "affinity", "podAntiAffinityType") allErrors := field.ErrorList{} - if r.Spec.Affinity.PodAntiAffinityType != PodAntiAffinityTypePreferred && - r.Spec.Affinity.PodAntiAffinityType != PodAntiAffinityTypeRequired && + if r.Spec.Affinity.PodAntiAffinityType != apiv1.PodAntiAffinityTypePreferred && + r.Spec.Affinity.PodAntiAffinityType != apiv1.PodAntiAffinityTypeRequired && r.Spec.Affinity.PodAntiAffinityType != "" { allErrors = append(allErrors, field.Invalid( path, r.Spec.Affinity.PodAntiAffinityType, fmt.Sprintf("pod anti-affinity type must be '%s' (default if empty) or '%s'", - PodAntiAffinityTypePreferred, PodAntiAffinityTypeRequired), + apiv1.PodAntiAffinityTypePreferred, apiv1.PodAntiAffinityTypeRequired), )) } return allErrors } // validateBackupConfiguration validates the backup configuration -func (r *Cluster) validateBackupConfiguration() field.ErrorList { +func (v *ClusterCustomValidator) validateBackupConfiguration(r *apiv1.Cluster) field.ErrorList { if r.Spec.Backup == nil { return nil } @@ -2160,7 +2157,7 @@ func (r *Cluster) validateBackupConfiguration() field.ErrorList { } // validateRetentionPolicy validates the retention policy configuration -func (r *Cluster) validateRetentionPolicy() field.ErrorList { +func (v *ClusterCustomValidator) validateRetentionPolicy(r *apiv1.Cluster) field.ErrorList { if r.Spec.Backup == nil { return nil } @@ -2170,13 +2167,13 @@ func (r *Cluster) validateRetentionPolicy() field.ErrorList { ) } -func (r *Cluster) validateReplicationSlots() field.ErrorList { +func (v *ClusterCustomValidator) validateReplicationSlots(r *apiv1.Cluster) field.ErrorList { if r.Spec.ReplicationSlots == nil { - r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + r.Spec.ReplicationSlots = &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), }, - SynchronizeReplicas: &SynchronizeReplicasConfiguration{ + SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{ Enabled: ptr.To(true), }, } @@ -2187,46 +2184,75 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList { return nil } - psqlVersion, err := r.GetPostgresqlVersion() - if err != nil { - // The validation error will be already raised by the - // validateImageName function + if err := r.Spec.ReplicationSlots.SynchronizeReplicas.ValidateRegex(); err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "excludePatterns"), + err, + "Cannot configure synchronizeReplicas. Invalid regexes were found"), + } + } + + return nil +} + +func (v *ClusterCustomValidator) validateSynchronizeLogicalDecoding(r *apiv1.Cluster) field.ErrorList { + replicationSlots := r.Spec.ReplicationSlots + if replicationSlots.HighAvailability == nil || !replicationSlots.HighAvailability.SynchronizeLogicalDecoding { return nil } - if psqlVersion < 110000 { - if replicationSlots.HighAvailability.GetEnabled() { - return field.ErrorList{ - field.Invalid( - field.NewPath("spec", "replicationSlots", "highAvailability", "enabled"), - replicationSlots.HighAvailability.GetEnabled(), - "Cannot enable HA replication slots synchronization. PostgreSQL 11 or above required"), - } - } + if postgres.IsManagedExtensionUsed("pg_failover_slots", r.Spec.PostgresConfiguration.Parameters) { + return nil + } - if replicationSlots.SynchronizeReplicas.GetEnabled() { - return field.ErrorList{ - field.Invalid( - field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "enabled"), - replicationSlots.SynchronizeReplicas.GetEnabled(), - "Cannot enable user defined replication slots synchronization. PostgreSQL 11 or above required"), - } - } + pgMajor, err := r.GetPostgresqlMajorVersion() + if err != nil { + return nil } - if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.compileRegex(); len(errs) > 0 { + if pgMajor < 17 { return field.ErrorList{ field.Invalid( - field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "excludePatterns"), - errs, - "Cannot configure synchronizeReplicas. Invalid regexes were found"), + field.NewPath("spec", "replicationSlots", "highAvailability", "synchronizeLogicalDecoding"), + replicationSlots.HighAvailability.SynchronizeLogicalDecoding, + "pg_failover_slots extension must be enabled to use synchronizeLogicalDecoding with Postgres versions < 17", + ), } } - return nil + result := field.ErrorList{} + + hotStandbyFeedback, _ := postgres.ParsePostgresConfigBoolean( + r.Spec.PostgresConfiguration.Parameters[postgres.ParameterHotStandbyFeedback]) + if !hotStandbyFeedback { + result = append( + result, + field.Invalid( + field.NewPath("spec", "postgresql", "parameters", postgres.ParameterHotStandbyFeedback), + hotStandbyFeedback, + fmt.Sprintf("`%s` must be enabled to enable "+ + "`spec.replicationSlots.highAvailability.synchronizeLogicalDecoding`", + postgres.ParameterHotStandbyFeedback))) + } + + const syncReplicationSlotsKey = "sync_replication_slots" + syncReplicationSlots, _ := postgres.ParsePostgresConfigBoolean( + r.Spec.PostgresConfiguration.Parameters[syncReplicationSlotsKey]) + if !syncReplicationSlots { + result = append( + result, + field.Invalid( + field.NewPath("spec", "postgresql", "parameters", syncReplicationSlotsKey), + syncReplicationSlots, + fmt.Sprintf("either `%s` setting or pg_failover_slots extension must be enabled to enable "+ + "`spec.replicationSlots.highAvailability.synchronizeLogicalDecoding`", syncReplicationSlotsKey))) + } + + return result } -func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateReplicationSlotsChange(r, old *apiv1.Cluster) field.ErrorList { newReplicationSlots := r.Spec.ReplicationSlots oldReplicationSlots := old.Spec.ReplicationSlots @@ -2262,7 +2288,7 @@ func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList { return errs } -func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateWALLevelChange(r, old *apiv1.Cluster) field.ErrorList { var errs field.ErrorList newWALLevel := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLevel] @@ -2279,7 +2305,7 @@ func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList { return errs } -func (r *Cluster) validateManagedServices() field.ErrorList { +func (v *ClusterCustomValidator) validateManagedServices(r *apiv1.Cluster) field.ErrorList { reservedNames := []string{ r.GetServiceReadWriteName(), r.GetServiceReadOnlyName(), @@ -2305,10 +2331,10 @@ func (r *Cluster) validateManagedServices() field.ErrorList { basePath := field.NewPath("spec", "managed", "services") var errs field.ErrorList - if slices.Contains(managedServices.DisabledDefaultServices, ServiceSelectorTypeRW) { + if slices.Contains(managedServices.DisabledDefaultServices, apiv1.ServiceSelectorTypeRW) { errs = append(errs, field.Invalid( basePath.Child("disabledDefaultServices"), - ServiceSelectorTypeRW, + apiv1.ServiceSelectorTypeRW, "service of type RW cannot be disabled.", )) } @@ -2352,7 +2378,7 @@ func (r *Cluster) validateManagedServices() field.ErrorList { func validateServiceTemplate( path *field.Path, nameRequired bool, - template ServiceTemplateSpec, + template apiv1.ServiceTemplateSpec, ) field.ErrorList { var errs field.ErrorList @@ -2372,7 +2398,7 @@ func validateServiceTemplate( } // validateManagedRoles validate the environment variables settings proposed by the user -func (r *Cluster) validateManagedRoles() field.ErrorList { +func (v *ClusterCustomValidator) validateManagedRoles(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.Managed == nil { @@ -2421,49 +2447,31 @@ func (r *Cluster) validateManagedRoles() field.ErrorList { } // validateManagedExtensions validate the managed extensions parameters set by the user -func (r *Cluster) validateManagedExtensions() field.ErrorList { +func (v *ClusterCustomValidator) validateManagedExtensions(r *apiv1.Cluster) field.ErrorList { allErrors := field.ErrorList{} - allErrors = append(allErrors, r.validatePgFailoverSlots()...) + allErrors = append(allErrors, v.validatePgFailoverSlots(r)...) return allErrors } -func (r *Cluster) validatePgFailoverSlots() field.ErrorList { +func (v *ClusterCustomValidator) validatePgFailoverSlots(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList var pgFailoverSlots postgres.ManagedExtension - for i, ext := range postgres.ManagedExtensions { - if ext.Name == "pg_failover_slots" { - pgFailoverSlots = postgres.ManagedExtensions[i] - } - } - if !pgFailoverSlots.IsUsed(r.Spec.PostgresConfiguration.Parameters) { + if !postgres.IsManagedExtensionUsed("pg_failover_slots", r.Spec.PostgresConfiguration.Parameters) { return nil } - const hotStandbyFeedbackKey = "hot_standby_feedback" - hotStandbyFeedbackActivated := false - hotStandbyFeedback, hasHotStandbyFeedback := r.Spec.PostgresConfiguration.Parameters[hotStandbyFeedbackKey] - if hasHotStandbyFeedback { - var err error - hotStandbyFeedbackActivated, err = postgres.ParsePostgresConfigBoolean(hotStandbyFeedback) - if err != nil { - result = append( - result, - field.Invalid( - field.NewPath("spec", "postgresql", "parameters", hotStandbyFeedbackKey), - hotStandbyFeedback, - fmt.Sprintf("invalid `%s` value. Must be a postgres boolean", hotStandbyFeedbackKey))) - } - } - - if !hotStandbyFeedbackActivated { + hotStandbyFeedback, _ := postgres.ParsePostgresConfigBoolean( + r.Spec.PostgresConfiguration.Parameters[postgres.ParameterHotStandbyFeedback]) + if !hotStandbyFeedback { result = append( result, field.Invalid( - field.NewPath("spec", "postgresql", "parameters", hotStandbyFeedbackKey), + field.NewPath("spec", "postgresql", "parameters", postgres.ParameterHotStandbyFeedback), hotStandbyFeedback, - fmt.Sprintf("`%s` must be enabled to use %s extension", hotStandbyFeedbackKey, pgFailoverSlots.Name))) + fmt.Sprintf("`%s` must be enabled to use %s extension", + postgres.ParameterHotStandbyFeedback, pgFailoverSlots.Name))) } if r.Spec.ReplicationSlots == nil { @@ -2488,11 +2496,125 @@ func (r *Cluster) validatePgFailoverSlots() field.ErrorList { return result } -func (r *Cluster) getAdmissionWarnings() admission.Warnings { - return r.getMaintenanceWindowsAdmissionWarnings() +func (v *ClusterCustomValidator) getAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { + list := getMaintenanceWindowsAdmissionWarnings(r) + list = append(list, getInTreeBarmanWarnings(r)...) + list = append(list, getRetentionPolicyWarnings(r)...) + list = append(list, getStorageWarnings(r)...) + return append(list, getSharedBuffersWarnings(r)...) } -func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings { +func getStorageWarnings(r *apiv1.Cluster) admission.Warnings { + generateWarningsFunc := func(path field.Path, configuration *apiv1.StorageConfiguration) admission.Warnings { + if configuration == nil { + return nil + } + + if configuration.PersistentVolumeClaimTemplate == nil { + return nil + } + + pvcTemplatePath := path.Child("pvcTemplate") + + var result admission.Warnings + if configuration.StorageClass != nil && configuration.PersistentVolumeClaimTemplate.StorageClassName != nil { + storageClass := path.Child("storageClass").String() + result = append( + result, + fmt.Sprintf("%s and %s are both specified, %s value will be used.", + storageClass, + pvcTemplatePath.Child("storageClassName"), + storageClass, + ), + ) + } + requestsSpecified := !configuration.PersistentVolumeClaimTemplate.Resources.Requests.Storage().IsZero() + if configuration.Size != "" && requestsSpecified { + size := path.Child("size").String() + result = append( + result, + fmt.Sprintf( + "%s and %s are both specified, %s value will be used.", + size, + pvcTemplatePath.Child("resources", "requests", "storage").String(), + size, + ), + ) + } + + return result + } + + var result admission.Warnings + + storagePath := *field.NewPath("spec", "storage") + result = append(result, generateWarningsFunc(storagePath, &r.Spec.StorageConfiguration)...) + + walStoragePath := *field.NewPath("spec", "walStorage") + return append(result, generateWarningsFunc(walStoragePath, r.Spec.WalStorage)...) +} + +func getInTreeBarmanWarnings(r *apiv1.Cluster) admission.Warnings { + var result admission.Warnings + + var paths []string + + if r.Spec.Backup != nil && r.Spec.Backup.BarmanObjectStore != nil { + paths = append(paths, field.NewPath("spec", "backup", "barmanObjectStore").String()) + } + + for idx, externalCluster := range r.Spec.ExternalClusters { + if externalCluster.BarmanObjectStore != nil { + paths = append(paths, field.NewPath("spec", "externalClusters", fmt.Sprintf("%d", idx), + "barmanObjectStore").String()) + } + } + + if len(paths) > 0 { + pathsStr := strings.Join(paths, ", ") + result = append( + result, + fmt.Sprintf("Native support for Barman Cloud backups and recovery is deprecated and will be "+ + "completely removed in CloudNativePG 1.28.0. Found usage in: %s. "+ + "Please migrate existing clusters to the new Barman Cloud Plugin to ensure a smooth transition.", + pathsStr), + ) + } + return result +} + +func getRetentionPolicyWarnings(r *apiv1.Cluster) admission.Warnings { + var result admission.Warnings + + if r.Spec.Backup != nil && r.Spec.Backup.RetentionPolicy != "" && r.Spec.Backup.BarmanObjectStore == nil { + result = append( + result, + "Retention policies specified in .spec.backup.retentionPolicy are only used by the "+ + "in-tree barman-cloud support, which is not being used in this cluster. "+ + "Please use a backup plugin and migrate this configuration to the plugin configuration", + ) + } + + return result +} + +func getSharedBuffersWarnings(r *apiv1.Cluster) admission.Warnings { + var result admission.Warnings + + if v := r.Spec.PostgresConfiguration.Parameters["shared_buffers"]; v != "" { + if _, err := strconv.Atoi(v); err == nil { + result = append( + result, + fmt.Sprintf("`shared_buffers` value '%s' is missing a unit (e.g., MB, GB). "+ + "While this is currently allowed, future releases will require an explicit unit. "+ + "Please update your configuration to specify a valid unit, such as '%sMB'.", v, v), + ) + } + } + return result +} + +func getMaintenanceWindowsAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { var result admission.Warnings if r.Spec.NodeMaintenanceWindow != nil { @@ -2504,7 +2626,7 @@ func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings { } // validate whether the hibernation configuration is valid -func (r *Cluster) validateHibernationAnnotation() field.ErrorList { +func (v *ClusterCustomValidator) validateHibernationAnnotation(r *apiv1.Cluster) field.ErrorList { value, ok := r.Annotations[utils.HibernationAnnotationName] isKnownValue := value == string(utils.HibernationAnnotationValueOn) || value == string(utils.HibernationAnnotationValueOff) @@ -2523,3 +2645,157 @@ func (r *Cluster) validateHibernationAnnotation() field.ErrorList { ), } } + +func (v *ClusterCustomValidator) validatePodPatchAnnotation(r *apiv1.Cluster) field.ErrorList { + jsonPatch, ok := r.Annotations[utils.PodPatchAnnotationName] + if !ok { + return nil + } + + if _, err := jsonpatch.DecodePatch([]byte(jsonPatch)); err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName), + jsonPatch, + fmt.Sprintf("error decoding JSON patch: %s", err.Error()), + ), + } + } + + if _, err := specs.NewInstance( + context.Background(), + *r, + 1, + true, + ); err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName), + jsonPatch, + fmt.Sprintf("jsonpatch doesn't apply cleanly to the pod: %s", err.Error()), + ), + } + } + + return nil +} + +func (v *ClusterCustomValidator) validatePluginConfiguration(r *apiv1.Cluster) field.ErrorList { + if len(r.Spec.Plugins) == 0 { + return nil + } + isBarmanObjectStoreConfigured := r.Spec.Backup != nil && r.Spec.Backup.BarmanObjectStore != nil + var walArchiverEnabled []string + + for _, plugin := range r.Spec.Plugins { + if !plugin.IsEnabled() { + continue + } + if plugin.IsWALArchiver != nil && *plugin.IsWALArchiver { + walArchiverEnabled = append(walArchiverEnabled, plugin.Name) + } + } + + var errorList field.ErrorList + if isBarmanObjectStoreConfigured { + if len(walArchiverEnabled) > 0 { + errorList = append(errorList, field.Invalid( + field.NewPath("spec", "plugins"), + walArchiverEnabled, + "Cannot enable a WAL archiver plugin when barmanObjectStore is configured")) + } + } + + if len(walArchiverEnabled) > 1 { + errorList = append(errorList, field.Invalid( + field.NewPath("spec", "plugins"), + walArchiverEnabled, + "Cannot enable more than one WAL archiver plugin")) + } + + return errorList +} + +func (v *ClusterCustomValidator) validateLivenessPingerProbe(r *apiv1.Cluster) field.ErrorList { + value, ok := r.Annotations[utils.LivenessPingerAnnotationName] + if !ok { + return nil + } + + _, err := apiv1.NewLivenessPingerConfigFromAnnotations(r.Annotations) + if err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("metadata", "annotations", utils.LivenessPingerAnnotationName), + value, + fmt.Sprintf("error decoding liveness pinger config: %s", err.Error()), + ), + } + } + + return nil +} + +func (v *ClusterCustomValidator) validateExtensions(r *apiv1.Cluster) field.ErrorList { + ensureNotEmptyOrDuplicate := func(path *field.Path, list *stringset.Data, value string) *field.Error { + if value == "" { + return field.Invalid( + path, + value, + "value cannot be empty", + ) + } + + if list.Has(value) { + return field.Duplicate( + path, + value, + ) + } + return nil + } + + if len(r.Spec.PostgresConfiguration.Extensions) == 0 { + return nil + } + + var result field.ErrorList + + extensionNames := stringset.New() + + for i, v := range r.Spec.PostgresConfiguration.Extensions { + basePath := field.NewPath("spec", "postgresql", "extensions").Index(i) + if nameErr := ensureNotEmptyOrDuplicate(basePath.Child("name"), extensionNames, v.Name); nameErr != nil { + result = append(result, nameErr) + } + extensionNames.Put(v.Name) + + controlPaths := stringset.New() + for j, path := range v.ExtensionControlPath { + if validateErr := ensureNotEmptyOrDuplicate( + basePath.Child("extension_control_path").Index(j), + controlPaths, + path, + ); validateErr != nil { + result = append(result, validateErr) + } + + controlPaths.Put(path) + } + + libraryPaths := stringset.New() + for j, path := range v.DynamicLibraryPath { + if validateErr := ensureNotEmptyOrDuplicate( + basePath.Child("dynamic_library_path").Index(j), + libraryPaths, + path, + ); validateErr != nil { + result = append(result, validateErr) + } + + libraryPaths.Put(path) + } + } + + return result +} diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go new file mode 100644 index 0000000000..a59bab25b8 --- /dev/null +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -0,0 +1,5874 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/cloudnative-pg/barman-cloud/pkg/api" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + pgversion "github.com/cloudnative-pg/machinery/pkg/postgres/version" + storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("bootstrap methods validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if there isn't a configuration", func() { + emptyCluster := &apiv1.Cluster{} + result := v.validateBootstrapMethod(emptyCluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if we are using initdb", func() { + initdbCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + }, + } + result := v.validateBootstrapMethod(initdbCluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if we are using recovery", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{}, + }, + }, + } + result := v.validateBootstrapMethod(recoveryCluster) + Expect(result).To(BeEmpty()) + }) + + It("complains where there are two active bootstrap methods", func() { + invalidCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{}, + InitDB: &apiv1.BootstrapInitDB{}, + }, + }, + } + result := v.validateBootstrapMethod(invalidCluster) + Expect(result).To(HaveLen(1)) + }) +}) + +var _ = Describe("certificates options validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if there isn't a configuration", func() { + emptyCluster := &apiv1.Cluster{} + result := v.validateCerts(emptyCluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if you specify some valid secret names", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Certificates: &apiv1.CertificatesConfiguration{ + ServerCASecret: "test-server-ca", + ServerTLSSecret: "test-server-tls", + }, + }, + } + result := v.validateCerts(cluster) + Expect(result).To(BeEmpty()) + }) + + It("does complain if you specify the TLS secret and not the CA", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Certificates: &apiv1.CertificatesConfiguration{ + ServerTLSSecret: "test-server-tls", + }, + }, + } + result := v.validateCerts(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("does complain if you specify the TLS secret and AltDNSNames is not empty", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Certificates: &apiv1.CertificatesConfiguration{ + ServerCASecret: "test-server-ca", + ServerTLSSecret: "test-server-tls", + ServerAltDNSNames: []string{"dns-name"}, + }, + }, + } + result := v.validateCerts(cluster) + Expect(result).To(HaveLen(1)) + }) +}) + +var _ = Describe("initdb options validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if there isn't a configuration", func() { + emptyCluster := &apiv1.Cluster{} + result := v.validateInitDB(emptyCluster) + Expect(result).To(BeEmpty()) + }) + + It("complains if you specify the database name but not the owner", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("complains if you specify the owner but not the database name", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Owner: "app", + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("doesn't complain if you specify both database name and owner user", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(BeEmpty()) + }) + + It("complain if key is missing in the secretRefs", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + SecretRefs: []apiv1.SecretKeySelector{ + { + LocalObjectReference: apiv1.LocalObjectReference{Name: "secret1"}, + }, + }, + }, + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("complain if name is missing in the secretRefs", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + SecretRefs: []apiv1.SecretKeySelector{ + { + Key: "key", + }, + }, + }, + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("complain if key is missing in the configMapRefs", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + ConfigMapRefs: []apiv1.ConfigMapKeySelector{ + { + LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap1"}, + }, + }, + }, + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("complain if name is missing in the configMapRefs", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + ConfigMapRefs: []apiv1.ConfigMapKeySelector{ + { + Key: "key", + }, + }, + }, + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("doesn't complain if configmapRefs and secretRefs are valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + ConfigMapRefs: []apiv1.ConfigMapKeySelector{ + { + LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap1"}, + Key: "key", + }, + { + LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap2"}, + Key: "key", + }, + }, + SecretRefs: []apiv1.SecretKeySelector{ + { + LocalObjectReference: apiv1.LocalObjectReference{Name: "secret1"}, + Key: "key", + }, + { + LocalObjectReference: apiv1.LocalObjectReference{Name: "secret2"}, + Key: "key", + }, + }, + }, + }, + }, + }, + } + + result := v.validateInitDB(cluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if superuser secret it's empty", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + + result := v.validateSuperuserSecret(cluster) + + Expect(result).To(BeEmpty()) + }) + + It("complains if superuser secret name it's empty", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + SuperuserSecret: &apiv1.LocalObjectReference{ + Name: "", + }, + }, + } + + result := v.validateSuperuserSecret(cluster) + Expect(result).To(HaveLen(1)) + }) +}) + +var _ = Describe("ImagePullPolicy validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if the imagePullPolicy isn't valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImagePullPolicy: "wrong", + }, + } + + result := v.validateImagePullPolicy(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("does not complain if the imagePullPolicy is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImagePullPolicy: "Always", + }, + } + + result := v.validateImagePullPolicy(cluster) + Expect(result).To(BeEmpty()) + }) +}) + +var _ = Describe("Image name validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if the user simply accept the default", func() { + var cluster apiv1.Cluster + Expect(v.validateImageName(&cluster)).To(BeEmpty()) + + // Let's apply the defaulting webhook, too + cluster.Default() + Expect(v.validateImageName(&cluster)).To(BeEmpty()) + }) + + It("complains when the 'latest' tag is detected", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:latest", + }, + } + Expect(v.validateImageName(cluster)).To(HaveLen(1)) + }) + + It("doesn't complain when a alpha tag is used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15alpha1", + }, + } + Expect(v.validateImageName(cluster)).To(BeEmpty()) + }) + + It("doesn't complain when a beta tag is used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15beta1", + }, + } + Expect(v.validateImageName(cluster)).To(BeEmpty()) + }) + + It("doesn't complain when a release candidate tag is used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15rc1", + }, + } + Expect(v.validateImageName(cluster)).To(BeEmpty()) + }) + + It("complains when only the sha is passed", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866", + }, + } + Expect(v.validateImageName(cluster)).To(HaveLen(1)) + }) + + It("doesn't complain if the tag is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + Expect(v.validateImageName(cluster)).To(BeEmpty()) + }) + + It("doesn't complain if the tag is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:14.4-1", + }, + } + Expect(v.validateImageName(cluster)).To(BeEmpty()) + }) + + It("doesn't complain if the tag is valid and has sha", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866", + }, + } + Expect(v.validateImageName(cluster)).To(BeEmpty()) + }) + + It("complain when the tag name is not a PostgreSQL version", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:test_12", + }, + } + Expect(v.validateImageName(cluster)).To(HaveLen(1)) + }) +}) + +var _ = DescribeTable("parsePostgresQuantityValue", + func(value string, parsedValue resource.Quantity, expectError bool) { + quantity, err := parsePostgresQuantityValue(value) + if !expectError { + Expect(quantity, err).Should(BeComparableTo(parsedValue)) + } else { + Expect(err).Should(HaveOccurred()) + } + }, + Entry("bare", "1", resource.MustParse("1Mi"), false), + Entry("B", "1B", resource.MustParse("1"), false), + Entry("kB", "1kB", resource.MustParse("1Ki"), false), + Entry("MB", "1MB", resource.MustParse("1Mi"), false), + Entry("GB", "1GB", resource.MustParse("1Gi"), false), + Entry("TB", "1TB", resource.MustParse("1Ti"), false), + Entry("spaceB", "1 B", resource.MustParse("1"), false), + Entry("spaceMB", "1 MB", resource.MustParse("1Mi"), false), + Entry("reject kb", "1kb", resource.Quantity{}, true), + Entry("reject Mb", "1Mb", resource.Quantity{}, true), + Entry("reject G", "1G", resource.Quantity{}, true), + Entry("reject random unit", "1random", resource.Quantity{}, true), + Entry("reject non-numeric", "non-numeric", resource.Quantity{}, true), +) + +var _ = Describe("configuration change validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain when the configuration is exactly the same", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + clusterNew := clusterOld.DeepCopy() + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + + It("doesn't complain when we change a setting which is not fixed", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "4G", + }, + }, + }, + } + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + + It("complains when changing postgres major version and settings", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.5", + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "4G", + }, + }, + }, + } + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + + It("produces no error when WAL size settings are correct", func() { + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "80MB", + "max_wal_size": "1024", + }, + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "1500", + "max_wal_size": "2 GB", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "3Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "1.5GB", + "max_wal_size": "2000", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "max_wal_size": "1GB", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "100MB", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{}, + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + }) + + It("produces one complaint when min_wal_size is bigger than max_wal_size", func() { + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "1500", + "max_wal_size": "1GB", + }, + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "2Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "2G", + "max_wal_size": "1GB", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "4Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + }) + + It("produces one complaint when max_wal_size is bigger than WAL storage", func() { + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "max_wal_size": "2GB", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "1G", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "4Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "80MB", + "max_wal_size": "1500", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "1G", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "4Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + }) + + It("produces two complaints when min_wal_size is bigger than WAL storage and max_wal_size", func() { + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "3GB", + "max_wal_size": "1GB", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(2)) + }) + + It("complains about invalid value for min_wal_size and max_wal_size", func() { + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "xxx", + "max_wal_size": "1GB", + }, + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "80", + "max_wal_size": "1Gb", + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + }) + + It("doesn't compare default values for min_wal_size and max_wal_size with WalStorage", func() { + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{}, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "100Mi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "min_wal_size": "1.5GB", // default for max_wal_size is 1GB + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "max_wal_size": "70M", // default for min_wal_size is 80M + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "2Gi", + }, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "4Gi", + }, + }, + } + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) + }) + + It("should detect an invalid `shared_buffers` value", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "invalid", + }, + }, + }, + } + + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) + }) + + It("should reject minimal wal_level when backup is configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{}, + }, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + }, + }, + }, + } + Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) + }) + + It("should allow replica wal_level when backup is configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{}, + }, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "replica", + }, + }, + }, + } + Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should allow logical wal_level when backup is configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{}, + }, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "logical", + }, + }, + }, + } + Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should reject minimal wal_level when instances is greater than one", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 2, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + }, + }, + }, + } + + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) + }) + + It("should allow replica wal_level when instances is greater than one", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 2, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "replica", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should allow logical wal_level when instances is greater than one", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 2, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "logical", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should reject an unknown wal_level value", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "test", + }, + }, + }, + } + + errs := v.validateConfiguration(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Detail).To(ContainSubstring("unrecognized `wal_level` value - allowed values")) + }) + + It("should reject minimal if it is a replica cluster", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 1, + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + }, + }, + }, + } + Expect(cluster.IsReplica()).To(BeTrue()) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) + }) + + It("should allow minimal wal_level with one instance and without archive mode", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should disallow minimal wal_level with one instance, without max_wal_senders being specified", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) + }) + + It("rejects PostgreSQL version lower than 13", func() { + v := &ClusterCustomValidator{} + + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:12", + }, + } + + result := v.validateConfiguration(cluster) + + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.imageName")) + Expect(result[0].Detail).To(ContainSubstring("Unsupported PostgreSQL version")) + Expect(result[0].Detail).To(ContainSubstring("Versions 13 or newer are supported")) + }) + + It("should disallow changing wal_level to minimal for existing clusters", func() { + oldCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "max_wal_senders": "0", + }, + }, + }, + } + oldCluster.Default() + + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + }, + }, + }, + } + Expect(v.validateWALLevelChange(cluster, oldCluster)).To(HaveLen(1)) + }) + + It("should allow retaining wal_level to minimal for existing clusters", func() { + oldCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + }, + }, + }, + } + oldCluster.Default() + + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_level": "minimal", + "max_wal_senders": "0", + "shared_buffers": "512MB", + }, + }, + }, + } + Expect(v.validateWALLevelChange(cluster, oldCluster)).To(BeEmpty()) + }) + + Describe("wal_log_hints", func() { + It("should reject wal_log_hints set to an invalid value", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_log_hints": "foo", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) + }) + + It("should allow wal_log_hints set to off for clusters having just one instance", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_log_hints": "off", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should not allow wal_log_hints set to off for clusters having more than one instance", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_log_hints": "off", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).ToNot(BeEmpty()) + }) + + It("should allow wal_log_hints set to on for clusters having just one instance", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 1, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_log_hints": "on", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + + It("should not allow wal_log_hints set to on for clusters having more than one instance", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.SkipWalArchiving: "enabled", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "wal_log_hints": "true", + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) + }) + }) +}) + +var _ = Describe("validate image name change", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + Context("using image name", func() { + It("doesn't complain with no changes", func() { + defaultVersion, err := pgversion.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + Status: apiv1.ClusterStatus{ + Image: versions.DefaultImageName, + PGDataImageInfo: &apiv1.ImageInfo{ + Image: versions.DefaultImageName, + MajorVersion: int(defaultVersion.Major()), + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + + It("complains if it can't upgrade between mayor versions", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.0", + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:17.0", + MajorVersion: 17, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16.0", + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + + It("doesn't complain if image change is valid", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.1", + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:17.1", + MajorVersion: 17, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.0", + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + }) + Context("using image catalog", func() { + It("complains on major downgrades", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 16, + }, + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 16, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 15, + }, + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + }) + Context("changing from imageName to imageCatalogRef", func() { + It("doesn't complain when the major is the same", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16.1", + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:16.1", + MajorVersion: 16, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 16, + }, + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + It("complains on major downgrades", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.1", + }, + Status: apiv1.ClusterStatus{ + Image: "postgres:17.1", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:17.1", + MajorVersion: 17, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 16, + }, + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + It("complains going from default imageName to different major imageCatalogRef", func() { + defaultVersion, err := pgversion.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + Status: apiv1.ClusterStatus{ + Image: versions.DefaultImageName, + PGDataImageInfo: &apiv1.ImageInfo{ + Image: versions.DefaultImageName, + MajorVersion: int(defaultVersion.Major()), + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 16, + }, + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + It("doesn't complain going from default imageName to same major imageCatalogRef", func() { + defaultImageRef := reference.New(versions.DefaultImageName) + version, err := pgversion.FromTag(defaultImageRef.Tag) + Expect(err).ToNot(HaveOccurred()) + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: int(version.Major()), + }, + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + }) + + Context("changing from imageCatalogRef to imageName", func() { + It("doesn't complain when the major is the same", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 17, + }, + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 17, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.1", + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + It("complains on major downgrades", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 17, + }, + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 17, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16.1", + }, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + It("complains going from imageCatalogRef to lower major default imageName", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: 18, + }, + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 18, + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + It("doesn't complain going from imageCatalogRef to same major default imageName", func() { + imageNameRef := reference.New(versions.DefaultImageName) + version, err := pgversion.FromTag(imageNameRef.Tag) + Expect(err).ToNot(HaveOccurred()) + + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test", + Kind: "ImageCatalog", + }, + Major: int(version.Major()), + }, + }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: int(version.Major()), + }, + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + }) +}) + +var _ = Describe("recovery target", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("is mutually exclusive", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "", + TargetXID: "", + TargetName: "", + TargetLSN: "1/1", + TargetTime: "2021-09-01 10:22:47.000000+06", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + + It("Requires BackupID to perform PITR with TargetName", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + BackupID: "20220616T031500", + TargetTLI: "", + TargetXID: "", + TargetName: "restore_point_1", + TargetLSN: "", + TargetTime: "", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + It("Fails when no BackupID is provided to perform PITR with TargetXID", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + BackupID: "", + TargetTLI: "", + TargetXID: "1/1", + TargetName: "", + TargetLSN: "", + TargetTime: "", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + + It("TargetTime's format as `YYYY-MM-DD HH24:MI:SS.FF6TZH` is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "", + TargetXID: "", + TargetName: "", + TargetLSN: "", + TargetTime: "2021-09-01 10:22:47.000000+06", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6TZH:TZM` is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "", + TargetXID: "", + TargetName: "", + TargetLSN: "", + TargetTime: "2021-09-01 10:22:47.000000+06:00", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6 TZH:TZM` is invalid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "", + TargetXID: "", + TargetName: "", + TargetLSN: "", + TargetTime: "2021-09-01 10:22:47.000000 +06:00", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + + It("raises errors for invalid LSN", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "", + TargetXID: "", + TargetName: "", + TargetLSN: "28734982739847293874823974928738423/987429837498273498723984723", + TargetTime: "", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + + It("valid LSN", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "", + TargetXID: "", + TargetName: "", + TargetLSN: "1/1", + TargetTime: "", + TargetImmediate: nil, + Exclusive: nil, + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + It("can be specified", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTime: "2020-01-01 01:01:00", + }, + }, + }, + }, + } + + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + When("recoveryTLI is specified", func() { + It("allows 'latest'", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "latest", + }, + }, + }, + }, + } + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + It("allows a positive integer", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "23", + }, + }, + }, + }, + } + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) + }) + + It("prevents 0 value", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "0", + }, + }, + }, + }, + } + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + + It("prevents negative values", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "-5", + }, + }, + }, + }, + } + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + + It("prevents everything else beside the empty string", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTLI: "I don't remember", + }, + }, + }, + }, + } + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) + }) + }) +}) + +var _ = Describe("primary update strategy", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("allows 'unsupervised'", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategyUnsupervised, + Instances: 3, + }, + } + Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty()) + }) + + It("allows 'supervised'", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategySupervised, + Instances: 3, + }, + } + Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty()) + }) + + It("prevents 'supervised' for single-instance clusters", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategySupervised, + Instances: 1, + }, + } + Expect(v.validatePrimaryUpdateStrategy(cluster)).ToNot(BeEmpty()) + }) + + It("allows 'unsupervised' for single-instance clusters", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategyUnsupervised, + Instances: 1, + }, + } + Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty()) + }) + + It("prevents everything else", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: "maybe", + Instances: 3, + }, + } + Expect(v.validatePrimaryUpdateStrategy(cluster)).ToNot(BeEmpty()) + }) +}) + +var _ = Describe("Number of synchronous replicas", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + Context("new-style configuration", func() { + It("can't have both new-style configuration and legacy one", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + MinSyncReplicas: 1, + MaxSyncReplicas: 2, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 2, + }, + }, + }, + } + Expect(v.validateConfiguration(cluster)).ToNot(BeEmpty()) + }) + }) + + Context("legacy configuration", func() { + It("should be a positive integer", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + MaxSyncReplicas: -3, + }, + } + Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty()) + }) + + It("should not be equal than the number of replicas", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + MaxSyncReplicas: 3, + }, + } + Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty()) + }) + + It("should not be greater than the number of replicas", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + MaxSyncReplicas: 5, + }, + } + Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty()) + }) + + It("can be zero", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + MaxSyncReplicas: 0, + }, + } + Expect(v.validateMaxSyncReplicas(cluster)).To(BeEmpty()) + }) + + It("can be lower than the number of replicas", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + MaxSyncReplicas: 2, + }, + } + Expect(v.validateMaxSyncReplicas(cluster)).To(BeEmpty()) + }) + }) +}) + +var _ = Describe("validateSynchronousReplicaConfiguration", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("returns no error when synchronous configuration is nil", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: nil, + }, + }, + } + errors := v.validateSynchronousReplicaConfiguration(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns an error when number of synchronous replicas is greater than the total instances and standbys", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 2, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 5, + StandbyNamesPost: []string{"standby1"}, + StandbyNamesPre: []string{"standby2"}, + }, + }, + }, + } + errors := v.validateSynchronousReplicaConfiguration(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To( + Equal("Invalid synchronous configuration: the number of synchronous replicas must be less than the " + + "total number of instances and the provided standby names.")) + }) + + It("returns an error when number of synchronous replicas is equal to total instances and standbys", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 5, + StandbyNamesPost: []string{"standby1"}, + StandbyNamesPre: []string{"standby2"}, + }, + }, + }, + } + errors := v.validateSynchronousReplicaConfiguration(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Invalid synchronous configuration: the number of synchronous replicas " + + "must be less than the total number of instances and the provided standby names.")) + }) + + It("returns no error when number of synchronous replicas is less than total instances and standbys", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Instances: 2, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 2, + StandbyNamesPost: []string{"standby1"}, + StandbyNamesPre: []string{"standby2"}, + }, + }, + }, + } + errors := v.validateSynchronousReplicaConfiguration(cluster) + Expect(errors).To(BeEmpty()) + }) +}) + +var _ = Describe("storage configuration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if the size is being reduced", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1G", + }, + }, + } + + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "512M", + }, + }, + } + + Expect(v.validateStorageChange(clusterNew, clusterOld)).ToNot(BeEmpty()) + }) + + It("does not complain if nothing has been changed", func() { + one := "one" + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1G", + StorageClass: &one, + }, + }, + } + + clusterNew := clusterOld.DeepCopy() + + Expect(v.validateStorageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + + It("works fine is the size is being enlarged", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "8G", + }, + }, + } + + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10G", + }, + }, + } + + Expect(v.validateStorageChange(clusterNew, clusterOld)).To(BeEmpty()) + }) +}) + +var _ = Describe("Cluster name validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("should be a valid DNS label", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test.one", + }, + } + Expect(v.validateName(cluster)).ToNot(BeEmpty()) + }) + + It("should not be too long", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi", + }, + } + Expect(v.validateName(cluster)).ToNot(BeEmpty()) + }) + + It("should not raise errors when the name is ok", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghi" + + "abcdefghi" + + "abcdefghi" + + "abcdefghi", + }, + } + Expect(v.validateName(cluster)).To(BeEmpty()) + }) + + It("should return errors when the name is not DNS-1035 compliant", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "4b96d026-a956-47eb-bae8-a99b840805c3", + }, + } + Expect(v.validateName(cluster)).NotTo(BeEmpty()) + }) + + It("should return errors when the name length is greater than 50", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("toomuchlong", 4) + "-" + "after4times", + }, + } + Expect(v.validateName(cluster)).NotTo(BeEmpty()) + }) + + It("should return errors when having a name with dots", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "wrong.name", + }, + } + Expect(v.validateName(cluster)).NotTo(BeEmpty()) + }) +}) + +var _ = Describe("validation of the list of external clusters", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("is correct when it's empty", func() { + cluster := &apiv1.Cluster{} + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) + }) + + It("complains when the list of clusters contains duplicates", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "one", + ConnectionParameters: map[string]string{ + "dbname": "postgres", + }, + }, + { + Name: "one", + ConnectionParameters: map[string]string{ + "dbname": "postgres", + }, + }, + }, + }, + } + Expect(v.validateExternalClusters(cluster)).ToNot(BeEmpty()) + }) + + It("should not raise errors is the cluster name is unique", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "one", + ConnectionParameters: map[string]string{ + "dbname": "postgres", + }, + }, + { + Name: "two", + ConnectionParameters: map[string]string{ + "dbname": "postgres", + }, + }, + }, + }, + } + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) + }) +}) + +var _ = Describe("validation of an external cluster", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("ensure that one of connectionParameters and barmanObjectStore is set", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + {}, + }, + }, + } + Expect(v.validateExternalClusters(cluster)).To(Not(BeEmpty())) + + cluster.Spec.ExternalClusters[0].ConnectionParameters = map[string]string{ + "dbname": "postgres", + } + cluster.Spec.ExternalClusters[0].BarmanObjectStore = nil + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) + + cluster.Spec.ExternalClusters[0].ConnectionParameters = nil + cluster.Spec.ExternalClusters[0].BarmanObjectStore = &apiv1.BarmanObjectStoreConfiguration{} + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) + }) +}) + +var _ = Describe("bootstrap base backup validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if you specify the database name but not the owner for pg_basebackup", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ + Database: "app", + }, + }, + }, + } + + result := v.validatePgBaseBackupApplicationDatabase(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("complains if you specify the owner but not the database name for pg_basebackup", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ + Owner: "app", + }, + }, + }, + } + + result := v.validatePgBaseBackupApplicationDatabase(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("doesn't complain if you specify both database name and owner user for pg_basebackup", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ + Database: "app", + Owner: "app", + }, + }, + }, + } + + result := v.validatePgBaseBackupApplicationDatabase(cluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if we are not bootstrapping using pg_basebackup", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{}, + }, + } + result := v.validateBootstrapPgBaseBackupSource(recoveryCluster) + Expect(result).To(BeEmpty()) + }) + + It("complain when the source cluster doesn't exist", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ + Source: "test", + }, + }, + }, + } + result := v.validateBootstrapPgBaseBackupSource(recoveryCluster) + Expect(result).ToNot(BeEmpty()) + }) +}) + +var _ = Describe("bootstrap recovery validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if you specify the database name but not the owner for recovery", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Database: "app", + }, + }, + }, + } + + result := v.validateRecoveryApplicationDatabase(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("complains if you specify the owner but not the database name for recovery", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Owner: "app", + }, + }, + }, + } + + result := v.validateRecoveryApplicationDatabase(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("doesn't complain if you specify both database name and owner user for recovery", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Database: "app", + Owner: "app", + }, + }, + }, + } + + result := v.validateRecoveryApplicationDatabase(cluster) + Expect(result).To(BeEmpty()) + }) + + Context("does not complain when bootstrap recovery source matches one of the names of external clusters", func() { + When("using a barman object store configuration", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Source: "test", + }, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + BarmanObjectStore: &api.BarmanObjectStoreConfiguration{}, + }, + }, + }, + } + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) + Expect(errorsList).To(BeEmpty()) + }) + + When("using a plugin configuration", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Source: "test", + }, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + PluginConfiguration: &apiv1.PluginConfiguration{}, + }, + }, + }, + } + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) + Expect(errorsList).To(BeEmpty()) + }) + }) + + It("complains when bootstrap recovery source does not match one of the names of external clusters", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Source: "test", + }, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "another-test", + }, + }, + }, + } + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) + Expect(errorsList).ToNot(BeEmpty()) + }) + + It("complains when bootstrap recovery source have no BarmanObjectStore nor plugin configuration", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Source: "test", + }, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) + Expect(errorsList).To(HaveLen(1)) + }) +}) + +var _ = Describe("toleration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if we provide a proper toleration", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + Tolerations: []corev1.Toleration{ + { + Key: "test", + Operator: "Exists", + Effect: "NoSchedule", + }, + }, + }, + }, + } + result := v.validateTolerations(recoveryCluster) + Expect(result).To(BeEmpty()) + }) + + It("complain when the toleration ", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + Tolerations: []corev1.Toleration{ + { + Key: "", + Operator: "Equal", + Effect: "NoSchedule", + }, + }, + }, + }, + } + result := v.validateTolerations(recoveryCluster) + Expect(result).ToNot(BeEmpty()) + }) +}) + +var _ = Describe("validate anti-affinity", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if we provide an empty affinity section", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{}, + }, + } + result := v.validateAntiAffinity(cluster) + Expect(result).To(BeEmpty()) + }) + It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), + PodAntiAffinityType: "required", + }, + }, + } + result := v.validateAntiAffinity(cluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity disabled", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(false), + PodAntiAffinityType: "required", + }, + }, + } + result := v.validateAntiAffinity(recoveryCluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), + PodAntiAffinityType: "preferred", + }, + }, + } + result := v.validateAntiAffinity(recoveryCluster) + Expect(result).To(BeEmpty()) + }) + It("doesn't complain if we provide a proper PodAntiAffinity default with anti-affinity enabled", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), + PodAntiAffinityType: "", + }, + }, + } + result := v.validateAntiAffinity(recoveryCluster) + Expect(result).To(BeEmpty()) + }) + + It("complains if we provide a wrong PodAntiAffinity with anti-affinity disabled", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(false), + PodAntiAffinityType: "error", + }, + }, + } + result := v.validateAntiAffinity(recoveryCluster) + Expect(result).NotTo(BeEmpty()) + }) + + It("complains if we provide a wrong PodAntiAffinity with anti-affinity enabled", func() { + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), + PodAntiAffinityType: "error", + }, + }, + } + result := v.validateAntiAffinity(recoveryCluster) + Expect(result).NotTo(BeEmpty()) + }) +}) + +var _ = Describe("validation of the list of external clusters", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("is correct when it's empty", func() { + cluster := &apiv1.Cluster{} + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) + }) + + It("complains when the list of servers contains duplicates", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "one", + ConnectionParameters: map[string]string{}, + }, + { + Name: "one", + ConnectionParameters: map[string]string{}, + }, + }, + }, + } + Expect(v.validateExternalClusters(cluster)).ToNot(BeEmpty()) + }) + + It("should not raise errors is the server name is unique", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "one", + ConnectionParameters: map[string]string{}, + }, + { + Name: "two", + ConnectionParameters: map[string]string{}, + }, + }, + }, + } + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) + }) +}) + +var _ = Describe("bootstrap base backup validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complain when the source cluster doesn't exist", func() { + bootstrap := apiv1.BootstrapConfiguration{} + bpb := apiv1.BootstrapPgBaseBackup{Source: "test"} + bootstrap.PgBaseBackup = &bpb + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ + Source: "test", + }, + }, + }, + } + result := v.validateBootstrapPgBaseBackupSource(recoveryCluster) + Expect(result).ToNot(BeEmpty()) + }) +}) + +var _ = Describe("unix permissions identifiers change validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if the PostgresGID is changed", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresGID: apiv1.DefaultPostgresGID, + }, + } + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresGID: 53, + }, + } + Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).NotTo(BeEmpty()) + }) + + It("complains if the PostgresUID is changed", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresUID: apiv1.DefaultPostgresUID, + }, + } + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresGID: 74, + }, + } + Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).NotTo(BeEmpty()) + }) + + It("should not complain if the values havn't been changed", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresUID: 74, + PostgresGID: 76, + }, + } + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresUID: 74, + PostgresGID: 76, + }, + } + Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).To(BeEmpty()) + }) +}) + +var _ = Describe("promotion token validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if the replica token is not formatted in base64", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(false), + Source: "test", + PromotionToken: "this-is-a-wrong-token", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).ToNot(BeEmpty()) + }) + + It("complains if the replica token is not valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(false), + Source: "test", + PromotionToken: base64.StdEncoding.EncodeToString([]byte("{}")), + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).ToNot(BeEmpty()) + }) + + It("doesn't complain if the replica token is valid", func() { + tokenContent := utils.PgControldataTokenContent{ + LatestCheckpointTimelineID: "3", + REDOWALFile: "this-wal-file", + DatabaseSystemIdentifier: "231231212", + LatestCheckpointREDOLocation: "33322232", + TimeOfLatestCheckpoint: "we don't know", + OperatorVersion: "version info", + } + jsonToken, err := json.Marshal(tokenContent) + Expect(err).ToNot(HaveOccurred()) + + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(false), + Source: "test", + PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).To(BeEmpty()) + }) + + It("complains if the token is set on a replica cluster (enabled)", func() { + tokenContent := utils.PgControldataTokenContent{ + LatestCheckpointTimelineID: "1", + REDOWALFile: "0000000100000001000000A1", + DatabaseSystemIdentifier: "231231212", + LatestCheckpointREDOLocation: "0/1000000", + TimeOfLatestCheckpoint: "we don't know", + OperatorVersion: "version info", + } + jsonToken, err := json.Marshal(tokenContent) + Expect(err).ToNot(HaveOccurred()) + + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).NotTo(BeEmpty()) + }) + + It("complains if the token is set on a replica cluster (primary, default name)", func() { + tokenContent := utils.PgControldataTokenContent{ + LatestCheckpointTimelineID: "1", + REDOWALFile: "0000000100000001000000A1", + DatabaseSystemIdentifier: "231231212", + LatestCheckpointREDOLocation: "0/1000000", + TimeOfLatestCheckpoint: "we don't know", + OperatorVersion: "version info", + } + jsonToken, err := json.Marshal(tokenContent) + Expect(err).ToNot(HaveOccurred()) + + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + }, + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Primary: "test", + Source: "test", + PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).NotTo(BeEmpty()) + }) + + It("complains if the token is set on a replica cluster (primary, self)", func() { + tokenContent := utils.PgControldataTokenContent{ + LatestCheckpointTimelineID: "1", + REDOWALFile: "0000000100000001000000A1", + DatabaseSystemIdentifier: "231231212", + LatestCheckpointREDOLocation: "0/1000000", + TimeOfLatestCheckpoint: "we don't know", + OperatorVersion: "version info", + } + jsonToken, err := json.Marshal(tokenContent) + Expect(err).ToNot(HaveOccurred()) + + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Primary: "test", + Self: "test2", + Source: "test", + PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).NotTo(BeEmpty()) + }) + + It("complains it the token is set when minApplyDelay is being used", func() { + tokenContent := utils.PgControldataTokenContent{ + LatestCheckpointTimelineID: "1", + REDOWALFile: "0000000100000001000000A1", + DatabaseSystemIdentifier: "231231212", + LatestCheckpointREDOLocation: "0/1000000", + TimeOfLatestCheckpoint: "we don't know", + OperatorVersion: "version info", + } + jsonToken, err := json.Marshal(tokenContent) + Expect(err).ToNot(HaveOccurred()) + + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Primary: "test", + Self: "test", + Source: "test", + PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), + MinApplyDelay: &metav1.Duration{ + Duration: 1 * time.Hour, + }, + }, + }, + } + + result := v.validatePromotionToken(cluster) + Expect(result).NotTo(BeEmpty()) + }) +}) + +var _ = Describe("replica mode validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains if the bootstrap method is not specified", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + Expect(v.validateReplicaMode(cluster)).ToNot(BeEmpty()) + }) + + It("complains if the initdb bootstrap method is used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + Expect(v.validateReplicaMode(cluster)).ToNot(BeEmpty()) + }) + + It("doesn't complain about initdb if we enable the external cluster on an existing cluster", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "existing", + }, + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + result := v.validateReplicaMode(cluster) + Expect(result).To(BeEmpty()) + }) + + It("should complain if enabled is set to off during a transition", func() { + old := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "existing", + }, + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + Status: apiv1.ClusterStatus{ + SwitchReplicaClusterStatus: apiv1.SwitchReplicaClusterStatus{ + InProgress: true, + }, + }, + } + + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "existing", + }, + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(false), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + Status: apiv1.ClusterStatus{ + SwitchReplicaClusterStatus: apiv1.SwitchReplicaClusterStatus{ + InProgress: true, + }, + }, + } + + result := v.validateReplicaClusterChange(cluster, old) + Expect(result).To(HaveLen(1)) + Expect(result[0].Type).To(Equal(field.ErrorTypeForbidden)) + Expect(result[0].Field).To(Equal("spec.replica.enabled")) + }) + + It("is valid when the pg_basebackup bootstrap option is used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + result := v.validateReplicaMode(cluster) + Expect(result).To(BeEmpty()) + }) + + It("is valid when the restore bootstrap option is used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + result := v.validateReplicaMode(cluster) + Expect(result).To(BeEmpty()) + }) + + It("complains when the primary field is used with the enabled field", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Primary: "toast", + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, + }, + ExternalClusters: []apiv1.ExternalCluster{}, + }, + } + result := v.validateReplicaMode(cluster) + Expect(result).ToNot(BeEmpty()) + }) + + It("doesn't complain when the enabled field is not specified", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + }, + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Primary: "test", + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + result := v.validateReplicaMode(cluster) + Expect(result).To(BeEmpty()) + }) + + It("doesn't complain when creating a new primary cluster with the replication stanza set", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Primary: "test", + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + result := v.validateReplicaMode(cluster) + Expect(result).To(BeEmpty()) + }) +}) + +var _ = Describe("validate the replica cluster external clusters", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complains when the external cluster doesn't exist (source)", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, + }, + ExternalClusters: []apiv1.ExternalCluster{}, + }, + } + + cluster.Spec.Bootstrap.PgBaseBackup = nil + result := v.validateReplicaClusterExternalClusters(cluster) + Expect(result).ToNot(BeEmpty()) + }) + + It("complains when the external cluster doesn't exist (primary)", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Primary: "test2", + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + + result := v.validateReplicaClusterExternalClusters(cluster) + Expect(result).ToNot(BeEmpty()) + }) + + It("complains when the external cluster doesn't exist (self)", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ + Self: "test2", + Primary: "test", + Source: "test", + }, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: "test", + }, + }, + }, + } + + result := v.validateReplicaClusterExternalClusters(cluster) + Expect(result).ToNot(BeEmpty()) + }) +}) + +var _ = Describe("Validation changes", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if given old cluster is nil", func() { + newCluster := &apiv1.Cluster{} + err := v.validateClusterChanges(newCluster, nil) + Expect(err).To(BeNil()) + }) +}) + +var _ = Describe("Backup validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("complain if there's no credentials", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + }, + } + err := v.validateBackupConfiguration(cluster) + Expect(err).To(HaveLen(1)) + }) +}) + +var _ = Describe("Backup retention policy validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("doesn't complain if given policy is not provided", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{}, + }, + } + err := v.validateRetentionPolicy(cluster) + Expect(err).To(BeEmpty()) + }) + + It("doesn't complain if given policy is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + RetentionPolicy: "90d", + }, + }, + } + err := v.validateRetentionPolicy(cluster) + Expect(err).To(BeEmpty()) + }) + + It("complain if a given policy is not valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + RetentionPolicy: "09", + }, + }, + } + err := v.validateRetentionPolicy(cluster) + Expect(err).To(HaveLen(1)) + }) +}) + +var _ = Describe("validation of imports", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("rejects unrecognized import type", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: "fooBar", + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("rejects microservice import with roles", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, + Databases: []string{"foo"}, + Roles: []string{"bar"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("rejects microservice import without exactly one database", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, + Databases: []string{"foo", "bar"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("rejects microservice import with a wildcard on the database name", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, + Databases: []string{"*foo"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("accepts microservice import when well specified", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, + Databases: []string{"foo"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(BeEmpty()) + }) + + It("rejects monolith import with no databases", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, + Databases: []string{}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("rejects monolith import with PostImport Application SQL", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, + Databases: []string{"foo"}, + PostImportApplicationSQL: []string{"select * from bar"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("rejects monolith import with wildcards alongside specific values", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, + Databases: []string{"bar", "*"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + + cluster = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, + Databases: []string{"foo"}, + Roles: []string{"baz", "*"}, + }, + }, + }, + }, + } + + result = v.validateImport(cluster) + Expect(result).To(HaveLen(1)) + }) + + It("accepts monolith import with proper values", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, + Databases: []string{"foo"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(BeEmpty()) + }) + + It("accepts monolith import with wildcards", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Database: "app", + Owner: "app", + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, + Databases: []string{"*"}, + Roles: []string{"*"}, + }, + }, + }, + }, + } + + result := v.validateImport(cluster) + Expect(result).To(BeEmpty()) + }) +}) + +var _ = Describe("validation of replication slots configuration", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("can be enabled on the default PostgreSQL image", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + }, + UpdateInterval: 0, + }, + }, + } + cluster.Default() + + result := v.validateReplicationSlots(cluster) + Expect(result).To(BeEmpty()) + }) + + It("set replicationSlots by default", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + }, + } + cluster.Default() + Expect(cluster.Spec.ReplicationSlots).ToNot(BeNil()) + Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil()) + Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue())) + + result := v.validateReplicationSlots(cluster) + Expect(result).To(BeEmpty()) + }) + + It("set replicationSlots.highAvailability by default", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + UpdateInterval: 30, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil()) + Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue())) + + result := v.validateReplicationSlots(cluster) + Expect(result).To(BeEmpty()) + }) + + It("allows enabling replication slots on the fly", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(false), + }, + }, + }, + } + oldCluster.Default() + + newCluster := oldCluster.DeepCopy() + newCluster.Spec.ReplicationSlots = &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_test_", + }, + } + + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(BeEmpty()) + }) + + It("prevents changing the slot prefix while replication slots are enabled", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_test_", + }, + }, + }, + } + oldCluster.Default() + + newCluster := oldCluster.DeepCopy() + newCluster.Spec.ReplicationSlots.HighAvailability.SlotPrefix = "_toast_" + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(HaveLen(1)) + }) + + It("prevents removing the replication slot section when replication slots are enabled", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_test_", + }, + }, + }, + } + oldCluster.Default() + + newCluster := oldCluster.DeepCopy() + newCluster.Spec.ReplicationSlots = nil + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(HaveLen(1)) + }) + + It("allows disabling the replication slots", func() { + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_test_", + }, + }, + }, + } + oldCluster.Default() + + newCluster := oldCluster.DeepCopy() + newCluster.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(false) + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(BeEmpty()) + }) + + It("should return an error when SynchronizeReplicasConfiguration is not nil and has invalid regex", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{ + ExcludePatterns: []string{"([a-zA-Z]+"}, + }, + }, + }, + } + errors := v.validateReplicationSlots(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Cannot configure synchronizeReplicas. Invalid regexes were found")) + }) + + It("should not return an error when SynchronizeReplicasConfiguration is not nil and regex is valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{ + ExcludePatterns: []string{"validpattern"}, + }, + }, + }, + } + errors := v.validateReplicationSlots(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("should not return an error when SynchronizeReplicasConfiguration is nil", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: versions.DefaultImageName, + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + SynchronizeReplicas: nil, + }, + }, + } + errors := v.validateReplicationSlots(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when synchronizeLogicalDecoding is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: false, + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(BeNil()) + }) + + It("returns no errors when pg_failover_slots is enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "true", + "pg_failover_slots.synchronize_slot_names": "name_like:%", + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(BeNil()) + }) + + It("returns an error when Postgres version is < 17 and pg_failover_slots is not enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + }, + } + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(HaveLen(1)) + Expect(result[0].Error()).To(ContainSubstring("pg_failover_slots extension must be enabled")) + }) + + It("returns an error when Postgres version is 17 and hot_standby_feedback is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "sync_replication_slots": "on", + "hot_standby_feedback": "off", + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(HaveLen(1)) + Expect(result[0].Error()).To(ContainSubstring("`hot_standby_feedback` must be enabled")) + }) + + It("returns an error when Postgres version is 17 and sync_replication_slots is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "on", + "sync_replication_slots": "off", + }, + }, + }, + } + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(HaveLen(1)) + Expect(result[0].Error()).To(ContainSubstring( + "`sync_replication_slots` setting or pg_failover_slots extension must be enabled")) + }) + + It("returns no errors when all conditions are met", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "on", + "sync_replication_slots": "on", + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(BeEmpty()) + }) +}) + +var _ = Describe("Environment variables validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + When("an environment variable is given", func() { + It("detects if it is valid", func() { + Expect(isReservedEnvironmentVariable("PGDATA")).To(BeTrue()) + }) + + It("detects if it is not valid", func() { + Expect(isReservedEnvironmentVariable("LC_ALL")).To(BeFalse()) + }) + }) + + When("a ClusterSpec is given", func() { + It("detects if the environment variable list is correct", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Env: []corev1.EnvVar{ + { + Name: "TZ", + Value: "Europe/Rome", + }, + }, + }, + } + + Expect(v.validateEnv(cluster)).To(BeEmpty()) + }) + + It("detects if the environment variable list contains a reserved variable", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Env: []corev1.EnvVar{ + { + Name: "TZ", + Value: "Europe/Rome", + }, + { + Name: "PGDATA", + Value: "/tmp", + }, + }, + }, + } + + Expect(v.validateEnv(cluster)).To(HaveLen(1)) + }) + }) +}) + +var _ = Describe("Storage configuration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + When("a ClusterSpec is given", func() { + It("produces one error if storage is not set at all", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{}, + }, + } + Expect(v.validateStorageSize(cluster)).To(HaveLen(1)) + }) + + It("succeeds if storage size is set", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1G", + }, + }, + } + Expect(v.validateStorageSize(cluster)).To(BeEmpty()) + }) + + It("succeeds if storage is not set but a pvc template specifies storage", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, + }, + }, + }, + }, + } + Expect(v.validateStorageSize(cluster)).To(BeEmpty()) + }) + }) +}) + +var _ = Describe("Ephemeral volume configuration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("succeeds if no ephemeral configuration is present", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) + }) + + It("succeeds if ephemeralVolumeSource is set", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, + }, + } + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) + }) + + It("succeeds if ephemeralVolumesSizeLimit.temporaryData is set", func() { + onegi := resource.MustParse("1Gi") + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{ + TemporaryData: &onegi, + }, + }, + } + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) + }) + + It("succeeds if ephemeralVolumeSource and ephemeralVolumesSizeLimit.shm are set", func() { + onegi := resource.MustParse("1Gi") + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, + EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{ + Shm: &onegi, + }, + }, + } + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) + }) + + It("produces one error if conflicting ephemeral storage options are set", func() { + onegi := resource.MustParse("1Gi") + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, + EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{ + TemporaryData: &onegi, + }, + }, + } + Expect(v.validateEphemeralVolumeSource(cluster)).To(HaveLen(1)) + }) +}) + +var _ = Describe("Role management validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("should succeed if there is no management stanza", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + Expect(v.validateManagedRoles(cluster)).To(BeEmpty()) + }) + + It("should succeed if the role defined is not reserved", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ + { + Name: "non-conflicting", + }, + }, + }, + }, + } + Expect(v.validateManagedRoles(cluster)).To(BeEmpty()) + }) + + It("should produce an error on invalid connection limit", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ + { + Name: "non-conflicting", + ConnectionLimit: -3, + }, + }, + }, + }, + } + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) + }) + + It("should produce an error if the role is reserved", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ + { + Name: "postgres", + }, + }, + }, + }, + } + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) + }) + + It("should produce two errors if the role is reserved and the connection limit is invalid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ + { + Name: "postgres", + ConnectionLimit: -3, + }, + }, + }, + }, + } + Expect(v.validateManagedRoles(cluster)).To(HaveLen(2)) + }) + + It("should produce an error if we define two roles with the same name", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ + { + Name: "my_test", + ConnectionLimit: -1, + }, + { + Name: "my_test", + Superuser: true, + BypassRLS: true, + ConnectionLimit: -1, + }, + }, + }, + }, + } + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) + }) + It("should produce an error if we have a password secret AND DisablePassword in a role", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ + { + Name: "my_test", + Superuser: true, + BypassRLS: true, + DisablePassword: true, + PasswordSecret: &apiv1.LocalObjectReference{ + Name: "myPassword", + }, + ConnectionLimit: -1, + }, + }, + }, + }, + } + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) + }) +}) + +var _ = Describe("Managed Extensions validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("should succeed if no extension is enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, + } + Expect(v.validateManagedExtensions(cluster)).To(BeEmpty()) + }) + + It("should fail if hot_standby_feedback is set to an invalid value", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "foo", + "pg_failover_slots.synchronize_slot_names": "my_slot", + }, + }, + }, + } + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1)) + }) + + It("should succeed if pg_failover_slots and its prerequisites are enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "on", + "pg_failover_slots.synchronize_slot_names": "my_slot", + }, + }, + }, + } + Expect(v.validatePgFailoverSlots(cluster)).To(BeEmpty()) + }) + + It("should produce two errors if pg_failover_slots is enabled and its prerequisites are disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "pg_failover_slots.synchronize_slot_names": "my_slot", + }, + }, + }, + } + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(2)) + }) + + It("should produce an error if pg_failover_slots is enabled and HA slots are disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "yes", + "pg_failover_slots.synchronize_slot_names": "my_slot", + }, + }, + }, + } + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1)) + }) + + It("should produce an error if pg_failover_slots is enabled and hot_standby_feedback is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "pg_failover_slots.synchronize_slot_names": "my_slot", + }, + }, + }, + } + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1)) + }) +}) + +var _ = Describe("Recovery from volume snapshot validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + clusterFromRecovery := func(recovery *apiv1.BootstrapRecovery) *apiv1.Cluster { + return &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: recovery, + }, + WalStorage: &apiv1.StorageConfiguration{}, + }, + } + } + + It("should produce an error when defining two recovery sources at the same time", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Source: "sourceName", + Backup: &apiv1.BackupSource{}, + VolumeSnapshots: &apiv1.DataSource{}, + }, + }, + }, + } + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1)) + }) + + It("should produce an error when defining a backupID while recovering using a DataSource", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ + BackupID: "20220616T031500", + }, + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(""), + Kind: "PersistentVolumeClaim", + Name: "pgdata", + }, + }, + }, + }, + }, + } + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1)) + }) + + It("should produce an error when asking to recovery WALs from a snapshot without having storage for it", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgdata", + }, + WalStorage: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgwal", + }, + }, + }, + }, + }, + } + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1)) + }) + + It("should not produce an error when the configuration is sound", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgdata", + }, + WalStorage: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgwal", + }, + }, + }, + }, + WalStorage: &apiv1.StorageConfiguration{}, + }, + } + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) + }) + + It("accepts recovery from a VolumeSnapshot", func() { + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: apiv1.VolumeSnapshotKind, + Name: "pgdata", + }, + WalStorage: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: apiv1.VolumeSnapshotKind, + Name: "pgwal", + }, + }, + }) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) + }) + + It("accepts recovery from a VolumeSnapshot, while restoring WALs from an object store", func() { + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: apiv1.VolumeSnapshotKind, + Name: "pgdata", + }, + }, + + Source: "pg-cluster", + }) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) + }) + + When("using an nil apiGroup", func() { + It("accepts recovery from a PersistentVolumeClaim", func() { + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgdata", + }, + WalStorage: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgwal", + }, + }, + }) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) + }) + }) + + When("using an empty apiGroup", func() { + It("accepts recovery from a PersistentVolumeClaim", func() { + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgdata", + }, + WalStorage: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(storagesnapshotv1.GroupName), + Kind: "VolumeSnapshot", + Name: "pgwal", + }, + }, + }) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) + }) + }) + + It("prevent recovery from other Objects", func() { + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ + Storage: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(""), + Kind: "Secret", + Name: "pgdata", + }, + WalStorage: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(""), + Kind: "ConfigMap", + Name: "pgwal", + }, + }, + }) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(2)) + }) +}) + +var _ = Describe("validateResources", func() { + var cluster *apiv1.Cluster + var v *ClusterCustomValidator + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{}, + }, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{}, + Limits: map[corev1.ResourceName]resource.Quantity{}, + }, + }, + } + v = &ClusterCustomValidator{} + }) + + It("returns an error when the CPU request is greater than CPU limit", func() { + cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") + cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") + + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) + }) + + It("returns an error when the Memory request is greater than Memory limit", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") + cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") + + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Memory request is greater than the limit")) + }) + + It("returns no error when the ephemeral storage request is correctly set", func() { + cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("1") + cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") + + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns an error when the ephemeral storage request is greater than ephemeral storage limit", func() { + cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2") + cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") + + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Ephemeral storage request is greater than the limit")) + }) + + It("returns three errors when CPU, Memory, and ephemeral storage requests are greater than limits", func() { + cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") + cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") + cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2") + cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") + + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(3)) + Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) + Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit")) + Expect(errors[2].Detail).To(Equal("Ephemeral storage request is greater than the limit")) + }) + + It("returns two errors when both CPU and Memory requests are greater than their limits", func() { + cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") + cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") + cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") + + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(2)) + Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) + Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit")) + }) + + It("returns no errors when both CPU and Memory requests are less than or equal to their limits", func() { + cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("1") + cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("2") + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Limits["memory"] = resource.MustParse("2Gi") + + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when CPU request is set but limit is nil", func() { + cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("1") + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when CPU limit is set but request is nil", func() { + cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when Memory request is set but limit is nil", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when Memory limit is set but request is nil", func() { + cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns an error when memoryRequest is less than shared_buffers in kB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000000kB" + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) + }) + + It("returns an error when memoryRequest is less than shared_buffers in MB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1000Mi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000MB" + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) + }) + + It("returns no errors when no memoryRequest is set", func() { + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when memoryRequest is greater than or equal to shared_buffers in GB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns an error when hugepages request is different than hugepages limits", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") + cluster.Spec.Resources.Requests["hugepages-1Gi"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Limits["hugepages-1Gi"] = resource.MustParse("2Gi") + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("HugePages requests must equal the limits")) + }) + + It("returns an error when hugepages request is present but no CPU or memory are", func() { + cluster.Spec.Resources.Requests["hugepages-1Gi"] = resource.MustParse("1Gi") + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("HugePages require cpu or memory")) + }) + + It("returns an error when no request is enough to contain shared_buffers, even if the sum is", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Requests["ugepages-1Gi"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Requests["hugepages-2Mi"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000000kB" + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) + }) + + It("returns no errors when hugepages-1Gi request is greater than or equal to shared_buffers in GB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("256Mi") + cluster.Spec.Resources.Requests["hugepages-1Gi"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when hugepages-2Mi request is greater than or equal to shared_buffers in GB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("256Mi") + cluster.Spec.Resources.Limits["hugepages-2Mi"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when shared_buffers is in a format that can't be parsed", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "invalid_value" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) +}) + +var _ = Describe("Tablespaces validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + createFakeTemporaryTbsConf := func(name string) apiv1.TablespaceConfiguration { + return apiv1.TablespaceConfiguration{ + Name: name, + Storage: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + } + } + + It("should succeed if there is no tablespaces section", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + }, + } + Expect(v.validate(cluster)).To(BeEmpty()) + }) + + It("should succeed if the tablespaces are ok", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("my_tablespace"), + }, + }, + } + Expect(v.validate(cluster)).To(BeEmpty()) + }) + + It("should produce an error if the tablespace name is too long", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + // each repetition is 14 char long, so 5x14 = 70 char > postgres limit + createFakeTemporaryTbsConf("my_tablespace1my_tablespace2my_tablespace3my_tablespace4my_tablespace5"), + }, + }, + } + Expect(v.validate(cluster)).To(HaveLen(1)) + }) + + It("should produce an error if the tablespace name is reserved by Postgres", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("pg_foo"), + }, + }, + } + Expect(v.validate(cluster)).To(HaveLen(1)) + }) + + It("should produce an error if the tablespace name is not valid", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + // each repetition is 14 char long, so 5x14 = 70 char > postgres limit + createFakeTemporaryTbsConf("my-^&sdf;"), + }, + }, + } + Expect(v.validate(cluster)).To(HaveLen(1)) + }) + + It("should produce an error if there are duplicate tablespaces", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("my_tablespace"), + createFakeTemporaryTbsConf("my_TAblespace"), + createFakeTemporaryTbsConf("another"), + }, + }, + } + Expect(v.validate(cluster)).To(HaveLen(1)) + }) + + It("should produce an error if the storage configured for the tablespace is invalid", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + // each repetition is 14 char long, so 5x14 = 70 char > postgres limit + { + Name: "my_tablespace1", + Storage: apiv1.StorageConfiguration{ + Size: "10Gibberish", + }, + }, + }, + }, + } + Expect(v.validate(cluster)).To(HaveLen(1)) + }) + + It("should produce two errors if two tablespaces have errors", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + // each repetition is 14 char long, so 5x14 = 70 char > postgres limit + { + Name: "my_tablespace1", + Storage: apiv1.StorageConfiguration{ + Size: "10Gibberish", + }, + }, + // each repetition is 14 char long, so 5x14 = 70 char > postgres limit + createFakeTemporaryTbsConf("my_tablespace1my_tablespace2my_tablespace3my_tablespace4my_tablespace5"), + }, + }, + } + Expect(v.validate(cluster)).To(HaveLen(2)) + }) + + It("should produce an error if the tablespaces section is deleted", func() { + oldCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("my-tablespace1"), + }, + }, + } + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + }, + } + Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1)) + }) + + It("should produce an error if a tablespace is deleted", func() { + oldCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("my-tablespace1"), + createFakeTemporaryTbsConf("my-tablespace2"), + }, + }, + } + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("my-tablespace1"), + }, + }, + } + Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1)) + }) + + It("should produce an error if a tablespace is reduced in size", func() { + oldCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + createFakeTemporaryTbsConf("my-tablespace1"), + }, + }, + } + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + { + Name: "my-tablespace1", + Storage: apiv1.StorageConfiguration{ + Size: "9Gi", + }, + }, + }, + }, + } + Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1)) + }) + + It("should not complain when the backup section refers to a tbs that is defined", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + { + Name: "my-tablespace1", + Storage: apiv1.StorageConfiguration{ + Size: "9Gi", + }, + }, + }, + Backup: &apiv1.BackupConfiguration{ + VolumeSnapshot: &apiv1.VolumeSnapshotConfiguration{ + TablespaceClassName: map[string]string{ + "my-tablespace1": "random", + }, + }, + }, + }, + } + Expect(v.validateTablespaceBackupSnapshot(cluster)).To(BeEmpty()) + }) + + It("should complain when the backup section refers to a tbs that is not defined", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "10Gi", + }, + Tablespaces: []apiv1.TablespaceConfiguration{ + { + Name: "my-tablespace1", + Storage: apiv1.StorageConfiguration{ + Size: "9Gi", + }, + }, + }, + Backup: &apiv1.BackupConfiguration{ + VolumeSnapshot: &apiv1.VolumeSnapshotConfiguration{ + TablespaceClassName: map[string]string{ + "not-present": "random", + }, + }, + }, + }, + } + Expect(v.validateTablespaceBackupSnapshot(cluster)).To(HaveLen(1)) + }) +}) + +var _ = Describe("Validate hibernation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("should succeed if hibernation is set to 'on'", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOn), + }, + }, + } + Expect(v.validateHibernationAnnotation(cluster)).To(BeEmpty()) + }) + + It("should succeed if hibernation is set to 'off'", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOff), + }, + }, + } + Expect(v.validateHibernationAnnotation(cluster)).To(BeEmpty()) + }) + + It("should fail if hibernation is set to an invalid value", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.HibernationAnnotationName: "", + }, + }, + } + Expect(v.validateHibernationAnnotation(cluster)).To(HaveLen(1)) + }) +}) + +var _ = Describe("validateManagedServices", func() { + var cluster *apiv1.Cluster + var v *ClusterCustomValidator + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Services: &apiv1.ManagedServices{ + Additional: []apiv1.ManagedService{}, + }, + }, + }, + } + v = &ClusterCustomValidator{} + }) + + Context("when Managed or Services is nil", func() { + It("should return no errors", func() { + cluster.Spec.Managed = nil + Expect(v.validateManagedServices(cluster)).To(BeNil()) + + cluster.Spec.Managed = &apiv1.ManagedConfiguration{} + cluster.Spec.Managed.Services = nil + Expect(v.validateManagedServices(cluster)).To(BeNil()) + }) + }) + + Context("when there are no duplicate names", func() { + It("should return no errors", func() { + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ + { + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service1"}, + }, + }, + { + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service2"}, + }, + }, + } + Expect(v.validateManagedServices(cluster)).To(BeNil()) + }) + }) + + Context("when there are duplicate names", func() { + It("should return an error", func() { + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ + { + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service1"}, + }, + }, + { + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service1"}, + }, + }, + } + errs := v.validateManagedServices(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errs[0].Field).To(Equal("spec.managed.services.additional")) + Expect(errs[0].Detail).To(ContainSubstring("contains services with the same .metadata.name")) + }) + }) + + Context("when service template validation fails", func() { + It("should return an error", func() { + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ + { + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: ""}, + }, + }, + } + errs := v.validateManagedServices(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errs[0].Field).To(Equal("spec.managed.services.additional[0]")) + }) + + It("should not allow reserved service names", func() { + assertError := func(name string, index int, err *field.Error) { + expectedDetail := fmt.Sprintf("the service name: '%s' is reserved for operator use", name) + Expect(err.Type).To(Equal(field.ErrorTypeInvalid)) + Expect(err.Field).To(Equal(fmt.Sprintf("spec.managed.services.additional[%d]", index))) + Expect(err.Detail).To(Equal(expectedDetail)) + } + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadWriteName()}}}, + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadName()}}}, + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadOnlyName()}}}, + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceAnyName()}}}, + } + errs := v.validateManagedServices(cluster) + Expect(errs).To(HaveLen(4)) + assertError("test-rw", 0, errs[0]) + assertError("test-r", 1, errs[1]) + assertError("test-ro", 2, errs[2]) + assertError("test-any", 3, errs[3]) + }) + }) + + Context("disabledDefault service validation", func() { + It("should allow the disablement of ro and r service", func() { + cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{ + apiv1.ServiceSelectorTypeR, + apiv1.ServiceSelectorTypeRO, + } + errs := v.validateManagedServices(cluster) + Expect(errs).To(BeEmpty()) + }) + + It("should not allow the disablement of rw service", func() { + cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{ + apiv1.ServiceSelectorTypeRW, + } + errs := v.validateManagedServices(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errs[0].Field).To(Equal("spec.managed.services.disabledDefaultServices")) + }) + }) +}) + +var _ = Describe("ServiceTemplate Validation", func() { + var ( + path *field.Path + serviceSpecs apiv1.ServiceTemplateSpec + ) + + BeforeEach(func() { + path = field.NewPath("spec") + }) + + Describe("validateServiceTemplate", func() { + Context("when name is required", func() { + It("should return an error if the name is empty", func() { + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: ""}, + } + + errs := validateServiceTemplate(path, true, serviceSpecs) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("name is required")) + }) + + It("should not return an error if the name is present", func() { + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "valid-name"}, + } + + errs := validateServiceTemplate(path, true, serviceSpecs) + Expect(errs).To(BeEmpty()) + }) + }) + + Context("when name is not allowed", func() { + It("should return an error if the name is present", func() { + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "invalid-name"}, + } + + errs := validateServiceTemplate(path, false, serviceSpecs) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("name is not allowed")) + }) + + It("should not return an error if the name is empty", func() { + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: ""}, + } + + errs := validateServiceTemplate(path, false, serviceSpecs) + Expect(errs).To(BeEmpty()) + }) + }) + + Context("when selector is present", func() { + It("should return an error if the selector is present", func() { + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "valid-name"}, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"app": "test"}, + }, + } + + errs := validateServiceTemplate(path, true, serviceSpecs) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("selector field is managed by the operator")) + }) + + It("should not return an error if the selector is absent", func() { + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "valid-name"}, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{}, + }, + } + + errs := validateServiceTemplate(path, true, serviceSpecs) + Expect(errs).To(BeEmpty()) + }) + }) + }) +}) + +var _ = Describe("validatePodPatchAnnotation", func() { + var v *ClusterCustomValidator + + It("returns nil if the annotation is not present", func() { + cluster := &apiv1.Cluster{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}} + Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil()) + }) + + It("returns an error if decoding the JSON patch fails to decode", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.PodPatchAnnotationName: "invalid-json-patch", + }, + }, + } + + errors := v.validatePodPatchAnnotation(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errors[0].Field).To(Equal("metadata.annotations." + utils.PodPatchAnnotationName)) + Expect(errors[0].Detail).To(ContainSubstring("error decoding JSON patch")) + }) + + It("returns an error if decoding the JSON patch fails to apply", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/podInvalidSection", "value": "test"}]`, + }, + }, + } + + errors := v.validatePodPatchAnnotation(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errors[0].Field).To(Equal("metadata.annotations." + utils.PodPatchAnnotationName)) + Expect(errors[0].Detail).To(ContainSubstring("jsonpatch doesn't apply cleanly to the pod")) + }) + + It("returns nil if the JSON patch is decoded successfully", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/metadata/name", "value": "test"}]`, + }, + }, + } + + Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil()) + }) +}) + +var _ = Describe("validatePluginConfiguration", func() { + var v *ClusterCustomValidator + var cluster *apiv1.Cluster + walPlugin1 := apiv1.PluginConfiguration{ + Name: "walArchiverPlugin1", + Enabled: ptr.To(true), + IsWALArchiver: ptr.To(true), + } + walPlugin2 := apiv1.PluginConfiguration{ + Name: "walArchiverPlugin2", + Enabled: ptr.To(true), + IsWALArchiver: ptr.To(true), + } + + BeforeEach(func() { + v = &ClusterCustomValidator{} + cluster = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Plugins: []apiv1.PluginConfiguration{}, + }, + } + }) + + It("returns no errors if no plugins are enabled", func() { + Expect(v.validatePluginConfiguration(cluster)).To(BeNil()) + }) + + It("returns an error if a WAL archiver plugin is enabled when barmanObjectStore is configured", func() { + cluster.Spec.Backup = &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + } + cluster.Spec.Plugins = append(cluster.Spec.Plugins, walPlugin1) + errs := v.validatePluginConfiguration(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring( + "Cannot enable a WAL archiver plugin when barmanObjectStore is configured")) + }) + + It("returns an error if more than one WAL archiver plugin is enabled", func() { + cluster.Spec.Plugins = append(cluster.Spec.Plugins, walPlugin1, walPlugin2) + errs := v.validatePluginConfiguration(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("Cannot enable more than one WAL archiver plugin")) + }) + + It("returns no errors when WAL archiver is enabled", func() { + cluster.Spec.Plugins = append(cluster.Spec.Plugins, walPlugin1) + Expect(v.validatePluginConfiguration(cluster)).To(BeNil()) + }) +}) + +var _ = Describe("liveness probe validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("returns no errors if the liveness pinger annotation is not present", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + } + Expect(v.validateLivenessPingerProbe(cluster)).To(BeNil()) + }) + + It("returns no errors if the liveness pinger annotation is valid", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"connectionTimeout": 1000, "requestTimeout": 5000, "enabled": true}`, + }, + }, + } + Expect(v.validateLivenessPingerProbe(cluster)).To(BeNil()) + }) + + It("returns an error if the liveness pinger annotation is invalid", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"requestTimeout": 5000}`, + }, + }, + } + errs := v.validateLivenessPingerProbe(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("error decoding liveness pinger config")) + }) +}) + +var _ = Describe("validateExtensions", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("returns no error when extensions are not specified", func() { + cluster := &apiv1.Cluster{} + Expect(v.validateExtensions(cluster)).To(BeEmpty()) + }) + + It("returns no error if the specified extensions are unique", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + { + Name: "extTwo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + }, + }, + }, + } + + Expect(v.validateExtensions(cluster)).To(BeEmpty()) + }) + + It("returns an error per duplicate extension name", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + { + Name: "extTwo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + { + Name: "extTwo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extTwo:1", + }, + }, + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne:1", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(2)) + Expect(err[0].BadValue).To(Equal("extTwo")) + Expect(err[1].BadValue).To(Equal("extOne")) + }) + + It("returns multiple errors for both invalid ExtensionControlPath and DynamicLibraryPath", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/valid/path", + "", + }, + DynamicLibraryPath: []string{ + "", + "/valid/lib/path", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(2)) + Expect(err[0].Field).To(ContainSubstring("extensions[0].extension_control_path[1]")) + Expect(err[1].Field).To(ContainSubstring("extensions[0].dynamic_library_path[0]")) + }) + + It("returns no error when ExtensionControlPath and DynamicLibraryPath are valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/usr/share/postgresql/extension", + "/opt/custom/extensions", + }, + DynamicLibraryPath: []string{ + "/usr/lib/postgresql/lib", + "/opt/custom/lib", + }, + }, + }, + }, + }, + } + + Expect(v.validateExtensions(cluster)).To(BeEmpty()) + }) + + It("returns errors for duplicate ExtensionControlPath entries", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/usr/share/postgresql/extension", + "/opt/custom/extensions", + "/usr/share/postgresql/extension", // duplicate + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(1)) + Expect(err[0].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[0].Field).To(ContainSubstring("extensions[0].extension_control_path[2]")) + Expect(err[0].BadValue).To(Equal("/usr/share/postgresql/extension")) + }) + + It("returns errors for duplicate DynamicLibraryPath entries", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + DynamicLibraryPath: []string{ + "/usr/lib/postgresql/lib", + "/opt/custom/lib", + "/usr/lib/postgresql/lib", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(1)) + Expect(err[0].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[0].Field).To(ContainSubstring("extensions[0].dynamic_library_path[2]")) + Expect(err[0].BadValue).To(Equal("/usr/lib/postgresql/lib")) + }) + + It("returns errors for duplicates in both ExtensionControlPath and DynamicLibraryPath", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/usr/share/postgresql/extension", + "/usr/share/postgresql/extension", + }, + DynamicLibraryPath: []string{ + "/usr/lib/postgresql/lib", + "/usr/lib/postgresql/lib", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(2)) + + Expect(err[0].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[0].BadValue).To(Equal("/usr/share/postgresql/extension")) + + Expect(err[1].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[1].BadValue).To(Equal("/usr/lib/postgresql/lib")) + }) +}) + +var _ = Describe("getInTreeBarmanWarnings", func() { + It("returns no warnings when BarmanObjectStore is not configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: nil, + ExternalClusters: nil, + }, + } + Expect(getInTreeBarmanWarnings(cluster)).To(BeEmpty()) + }) + + It("returns a warning when BarmanObjectStore is configured in backup", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + }, + } + warnings := getInTreeBarmanWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.backup.barmanObjectStore")) + }) + + It("returns warnings for multiple external clusters with BarmanObjectStore", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + {BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}}, + {BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}}, + }, + }, + } + warnings := getInTreeBarmanWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.externalClusters.0.barmanObjectStore")) + Expect(warnings[0]).To(ContainSubstring("spec.externalClusters.1.barmanObjectStore")) + }) + + It("returns warnings for both backup and external clusters with BarmanObjectStore", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + {BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}}, + }, + }, + } + warnings := getInTreeBarmanWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.backup.barmanObjectStore")) + Expect(warnings[0]).To(ContainSubstring("spec.externalClusters.0.barmanObjectStore")) + }) +}) + +var _ = Describe("getRetentionPolicyWarnings", func() { + It("returns no warnings if the retention policy is used with the in-tree backup support", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + RetentionPolicy: "this retention policy", + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + }, + } + + warnings := getRetentionPolicyWarnings(cluster) + Expect(warnings).To(BeEmpty()) + }) + + It("return a warning when retention policies are declared and not used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + RetentionPolicy: "this retention policy", + }, + }, + } + + warnings := getRetentionPolicyWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + }) +}) + +var _ = Describe("getStorageWarnings", func() { + It("returns no warnings when storage is properly configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns no warnings when PVC template has storage configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns a warning when both storageClass and storageClassName are specified", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.storage.storageClass")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.pvcTemplate.storageClassName")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.storageClass value will be used")) + }) + + It("returns a warning when both size and storage requests are specified", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.storage.size")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.pvcTemplate.resources.requests.storage")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.size value will be used")) + }) + + It("returns multiple warnings when both storage conflicts exist", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(2)) + Expect(warnings[0]).To(ContainSubstring("storageClass")) + Expect(warnings[1]).To(ContainSubstring("size")) + }) + + It("returns warnings for WAL storage configuration conflicts", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + WalStorage: &apiv1.StorageConfiguration{ + Size: "500Mi", + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(2)) + Expect(warnings[0]).To(ContainSubstring("spec.walStorage.storageClass")) + Expect(warnings[1]).To(ContainSubstring("spec.walStorage.size")) + }) + + It("returns warnings for both storage and WAL storage conflicts", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "500Mi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(2)) + Expect(warnings[0]).To(ContainSubstring("spec.storage")) + Expect(warnings[1]).To(ContainSubstring("spec.walStorage")) + }) + + It("returns no warnings when WAL storage is nil", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + WalStorage: nil, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns no warnings when PVC template is nil", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: nil, + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns no warnings when storage requests are zero", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{}, + }, + }, + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) +}) + +var _ = Describe("failoverQuorum validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("fails if it is active but no synchronous replication is configured", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: "t", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + }, + } + + errList := v.validateFailoverQuorum(cluster) + Expect(errList).To(HaveLen(1)) + }) + + It("requires at least three instances", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: "t", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 1, + }, + }, + }, + } + + errList := v.validateFailoverQuorum(cluster) + Expect(errList).To(BeEmpty()) + + cluster.Spec.Instances = 2 + errList = v.validateFailoverQuorum(cluster) + Expect(errList).To(HaveLen(1)) + }) + + It("check if the number of external synchronous replicas is coherent", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: "t", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 1, + StandbyNamesPre: []string{ + "one", + "two", + }, + StandbyNamesPost: []string{ + "three", + "four", + }, + }, + }, + }, + } + + errList := v.validateFailoverQuorum(cluster) + Expect(errList).To(HaveLen(1)) + }) +}) diff --git a/internal/webhook/v1/common.go b/internal/webhook/v1/common.go new file mode 100644 index 0000000000..6cac64f987 --- /dev/null +++ b/internal/webhook/v1/common.go @@ -0,0 +1,133 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +const ( + // validationEnabledAnnotationValue is the value of that "validation" + // annotation that is set when the validation is enabled + validationEnabledAnnotationValue = "enabled" + + // validationDisabledAnnotationValue is the value of that "validation" + // annotation that is set when the validation is disabled + validationDisabledAnnotationValue = "disabled" +) + +// isValidationEnabled checks whether validation webhooks are +// enabled or disabled +func isValidationEnabled(obj client.Object) (bool, error) { + value := obj.GetAnnotations()[utils.WebhookValidationAnnotationName] + switch value { + case validationEnabledAnnotationValue, "": + return true, nil + + case validationDisabledAnnotationValue: + return false, nil + + default: + return true, fmt.Errorf( + `invalid %q annotation: %q (expected "enabled" or "disabled")`, + utils.WebhookValidationAnnotationName, value) + } +} + +// bypassableValidator implements a custom validator that enables an +// existing custom validator to be enabled or disabled via an annotation. +type bypassableValidator struct { + validator admission.CustomValidator +} + +// newBypassableValidator creates a new custom validator that enables an +// existing custom validator to be enabled or disabled via an annotation. +func newBypassableValidator(validator admission.CustomValidator) *bypassableValidator { + return &bypassableValidator{ + validator: validator, + } +} + +// ValidateCreate validates the object on creation. +// The optional warnings will be added to the response as warning messages. +// Return an error if the object is invalid. +func (b bypassableValidator) ValidateCreate( + ctx context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + return validate(obj, func() (admission.Warnings, error) { + return b.validator.ValidateCreate(ctx, obj) + }) +} + +// ValidateUpdate validates the object on update. +// The optional warnings will be added to the response as warning messages. +// Return an error if the object is invalid. +func (b bypassableValidator) ValidateUpdate( + ctx context.Context, + oldObj runtime.Object, + newObj runtime.Object, +) (admission.Warnings, error) { + return validate(newObj, func() (admission.Warnings, error) { + return b.validator.ValidateUpdate(ctx, oldObj, newObj) + }) +} + +// ValidateDelete validates the object on deletion. +// The optional warnings will be added to the response as warning messages. +// Return an error if the object is invalid. +func (b bypassableValidator) ValidateDelete( + ctx context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + return validate(obj, func() (admission.Warnings, error) { + return b.validator.ValidateDelete(ctx, obj) + }) +} + +const validationDisabledWarning = "validation webhook is disabled — all changes are accepted without validation. " + + "This may lead to unsafe or destructive operations. Proceed with extreme caution." + +func validate(obj runtime.Object, validator func() (admission.Warnings, error)) (admission.Warnings, error) { + var warnings admission.Warnings + + validationEnabled, err := isValidationEnabled(obj.(client.Object)) + if err != nil { + // If the validation annotation value is unexpected, we continue validating + // the object but we warn the user that the value was wrong + warnings = append(warnings, err.Error()) + } + + if !validationEnabled { + warnings = append(warnings, validationDisabledWarning) + return warnings, nil + } + + validationWarnings, err := validator() + warnings = append(warnings, validationWarnings...) + return warnings, err +} diff --git a/internal/webhook/v1/common_test.go b/internal/webhook/v1/common_test.go new file mode 100644 index 0000000000..126877e215 --- /dev/null +++ b/internal/webhook/v1/common_test.go @@ -0,0 +1,172 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func newClusterWithValidationAnnotation(value string) *apiv1.Cluster { + return &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.WebhookValidationAnnotationName: value, + }, + }, + } +} + +var _ = Describe("Validation webhook validation parser", func() { + It("ensures that with no annotations the validation checking is enabled", func() { + cluster := &apiv1.Cluster{} + Expect(isValidationEnabled(cluster)).To(BeTrue()) + }) + + It("ensures that with validation can be explicitly enabled", func() { + cluster := newClusterWithValidationAnnotation(validationEnabledAnnotationValue) + Expect(isValidationEnabled(cluster)).To(BeTrue()) + }) + + It("ensures that with validation can be explicitly disabled", func() { + cluster := newClusterWithValidationAnnotation(validationDisabledAnnotationValue) + Expect(isValidationEnabled(cluster)).To(BeFalse()) + }) + + It("ensures that with validation is enabled when the annotation value is unknown", func() { + cluster := newClusterWithValidationAnnotation("idontknow") + status, err := isValidationEnabled(cluster) + Expect(err).To(HaveOccurred()) + Expect(status).To(BeTrue()) + }) +}) + +type fakeCustomValidator struct { + calls []string + + createWarnings admission.Warnings + createError error + + updateWarnings admission.Warnings + updateError error + + deleteWarnings admission.Warnings + deleteError error +} + +func (f *fakeCustomValidator) ValidateCreate( + _ context.Context, + _ runtime.Object, +) (admission.Warnings, error) { + f.calls = append(f.calls, "create") + return f.createWarnings, f.createError +} + +func (f *fakeCustomValidator) ValidateUpdate( + _ context.Context, + _ runtime.Object, + _ runtime.Object, +) (admission.Warnings, error) { + f.calls = append(f.calls, "update") + return f.updateWarnings, f.updateError +} + +func (f *fakeCustomValidator) ValidateDelete( + _ context.Context, + _ runtime.Object, +) (admission.Warnings, error) { + f.calls = append(f.calls, "delete") + return f.deleteWarnings, f.deleteError +} + +var _ = Describe("Bypassable validator", func() { + fakeCreateError := fmt.Errorf("fake error") + fakeUpdateError := fmt.Errorf("fake error") + fakeDeleteError := fmt.Errorf("fake error") + + disabledCluster := newClusterWithValidationAnnotation(validationDisabledAnnotationValue) + enabledCluster := newClusterWithValidationAnnotation(validationEnabledAnnotationValue) + wrongCluster := newClusterWithValidationAnnotation("dontknow") + + fakeErrorValidator := &fakeCustomValidator{ + createError: fakeCreateError, + deleteError: fakeDeleteError, + updateError: fakeUpdateError, + } + + DescribeTable( + "validator callbacks", + func(ctx SpecContext, c *apiv1.Cluster, expectedError, withWarnings bool) { + b := newBypassableValidator(fakeErrorValidator) + + By("creation entrypoint", func() { + result, err := b.ValidateCreate(ctx, c) + if expectedError { + Expect(err).To(Equal(fakeCreateError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + if withWarnings { + Expect(result).To(HaveLen(1)) + } + }) + + By("update entrypoint", func() { + result, err := b.ValidateUpdate(ctx, enabledCluster, c) + if expectedError { + Expect(err).To(Equal(fakeUpdateError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + if withWarnings { + Expect(result).To(HaveLen(1)) + } + }) + + By("delete entrypoint", func() { + result, err := b.ValidateDelete(ctx, c) + if expectedError { + Expect(err).To(Equal(fakeDeleteError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + if withWarnings { + Expect(result).To(HaveLen(1)) + } + }) + }, + Entry("validation is disabled", disabledCluster, false, true), + Entry("validation is enabled", enabledCluster, true, false), + Entry("validation value is not expected", wrongCluster, true, true), + ) +}) diff --git a/internal/webhook/v1/database_webhook.go b/internal/webhook/v1/database_webhook.go new file mode 100644 index 0000000000..f4250dc61f --- /dev/null +++ b/internal/webhook/v1/database_webhook.go @@ -0,0 +1,273 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// databaseLog is for logging in this package. +var databaseLog = log.WithName("database-resource").WithValues("version", "v1") + +// SetupDatabaseWebhookWithManager registers the webhook for Database in the manager. +func SetupDatabaseWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Database{}). + WithValidator(newBypassableValidator(&DatabaseCustomValidator{})). + WithDefaulter(&DatabaseCustomDefaulter{}). + Complete() +} + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-database,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=databases,verbs=create;update,versions=v1,name=mdatabase.cnpg.io,sideEffects=None +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-database,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=databases,versions=v1,name=vdatabase.cnpg.io,sideEffects=None + +// DatabaseCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind Database when those are created or updated. +type DatabaseCustomDefaulter struct{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Database. +func (d *DatabaseCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + database, ok := obj.(*apiv1.Database) + if !ok { + return fmt.Errorf("expected a database object but got %T", obj) + } + databaseLog.Info("Defaulting for database", "name", database.GetName(), "namespace", database.GetNamespace()) + + // database.Default() + + return nil +} + +// DatabaseCustomValidator is responsible for validating the Database +// resource when it is created, updated, or deleted. +type DatabaseCustomValidator struct{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Database . +func (v *DatabaseCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + database, ok := obj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object but got %T", obj) + } + databaseLog.Info( + "Validation for Database upon creation", + "name", database.GetName(), "namespace", database.GetNamespace()) + + allErrs := v.validate(database) + allWarnings := v.getAdmissionWarnings(database) + + if len(allErrs) == 0 { + return allWarnings, nil + } + + return allWarnings, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Database "}, + database.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Database . +func (v *DatabaseCustomValidator) ValidateUpdate( + _ context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + database, ok := newObj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object for the newObj but got %T", newObj) + } + + oldDatabase, ok := oldObj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object for the oldObj but got %T", oldObj) + } + + databaseLog.Info( + "Validation for Database upon update", + "name", database.GetName(), "namespace", database.GetNamespace()) + + allErrs := append( + v.validate(database), + v.validateDatabaseChanges(database, oldDatabase)..., + ) + allWarnings := v.getAdmissionWarnings(database) + + if len(allErrs) == 0 { + return allWarnings, nil + } + + return allWarnings, apierrors.NewInvalid( + schema.GroupKind{Group: "database.cnpg.io", Kind: "Database "}, + database.Name, allErrs) +} + +func (v *DatabaseCustomValidator) validateDatabaseChanges(_ *apiv1.Database, _ *apiv1.Database) field.ErrorList { + return nil +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Database . +func (v *DatabaseCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + database, ok := obj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object but got %T", obj) + } + databaseLog.Info( + "Validation for Database upon deletion", + "name", database.GetName(), "namespace", database.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +// validateDatabse groups the validation logic for databases returning a list of all encountered errors +func (v *DatabaseCustomValidator) validate(d *apiv1.Database) (allErrs field.ErrorList) { + type validationFunc func(*apiv1.Database) field.ErrorList + validations := []validationFunc{ + v.validateExtensions, + v.validateSchemas, + v.validateFDWs, + } + + for _, validate := range validations { + allErrs = append(allErrs, validate(d)...) + } + + return allErrs +} + +func (v *DatabaseCustomValidator) getAdmissionWarnings(_ *apiv1.Database) admission.Warnings { + return nil +} + +// validateExtensions validates the database extensions +func (v *DatabaseCustomValidator) validateExtensions(d *apiv1.Database) field.ErrorList { + var result field.ErrorList + + extensionNames := stringset.New() + for i, ext := range d.Spec.Extensions { + name := ext.Name + if extensionNames.Has(name) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "extensions").Index(i).Child("name"), + name, + ), + ) + } + + extensionNames.Put(name) + } + + return result +} + +// validateSchemas validates the database schemas +func (v *DatabaseCustomValidator) validateSchemas(d *apiv1.Database) field.ErrorList { + var result field.ErrorList + + schemaNames := stringset.New() + for i, schema := range d.Spec.Schemas { + name := schema.Name + if schemaNames.Has(name) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "schemas").Index(i).Child("name"), + name, + ), + ) + } + + schemaNames.Put(name) + } + + return result +} + +// validateFDWs validates the database Foreign Data Wrappers +// FDWs must be unique in .spec.fdws +func (v *DatabaseCustomValidator) validateFDWs(d *apiv1.Database) field.ErrorList { + var result field.ErrorList + + fdwNames := stringset.New() + for i, fdw := range d.Spec.FDWs { + name := fdw.Name + if fdwNames.Has(name) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "fdws").Index(i).Child("name"), + name, + ), + ) + } + + // Validate the options of the FDW + optionNames := stringset.New() + for k, option := range fdw.Options { + optionName := option.Name + if optionNames.Has(optionName) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "fdws").Index(i).Child("options").Index(k).Child("name"), + optionName, + ), + ) + } + + optionNames.Put(optionName) + } + + // Validate the usage of the FDW + usageNames := stringset.New() + for j, usage := range fdw.Usages { + usageName := usage.Name + if usageNames.Has(usageName) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "fdws").Index(i).Child("usages").Index(j).Child("name"), + usageName, + ), + ) + } + + usageNames.Put(usageName) + } + + fdwNames.Put(name) + } + + return result +} diff --git a/internal/webhook/v1/database_webhook_test.go b/internal/webhook/v1/database_webhook_test.go new file mode 100644 index 0000000000..cebb1d24e2 --- /dev/null +++ b/internal/webhook/v1/database_webhook_test.go @@ -0,0 +1,222 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Database validation", func() { + var v *DatabaseCustomValidator + + createExtensionSpec := func(name string) apiv1.ExtensionSpec { + return apiv1.ExtensionSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: name, + Ensure: apiv1.EnsurePresent, + }, + } + } + createSchemaSpec := func(name string) apiv1.SchemaSpec { + return apiv1.SchemaSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: name, + Ensure: apiv1.EnsurePresent, + }, + } + } + + createFDWSpec := func(name string) apiv1.FDWSpec { + return apiv1.FDWSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: name, + Ensure: apiv1.EnsurePresent, + }, + } + } + BeforeEach(func() { + v = &DatabaseCustomValidator{} + }) + + DescribeTable( + "Database validation", + func(db *apiv1.Database, errorCount int) { + foundErrors := v.validate(db) + Expect(foundErrors).To(HaveLen(errorCount)) + }, + Entry( + "doesn't complain when extensions and schemas are null", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{}, + }, + 0, + ), + Entry( + "doesn't complain if there are no duplicate extensions and no duplicate schemas", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Extensions: []apiv1.ExtensionSpec{ + createExtensionSpec("postgis"), + }, + Schemas: []apiv1.SchemaSpec{ + createSchemaSpec("test_schema"), + }, + }, + }, + 0, + ), + Entry( + "complain if there are duplicate extensions", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Extensions: []apiv1.ExtensionSpec{ + createExtensionSpec("postgis"), + createExtensionSpec("postgis"), + createExtensionSpec("cube"), + }, + }, + }, + 1, + ), + + Entry( + "complain if there are duplicate schemas", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Schemas: []apiv1.SchemaSpec{ + createSchemaSpec("test_one"), + createSchemaSpec("test_two"), + createSchemaSpec("test_two"), + }, + }, + }, + 1, + ), + + Entry( + "doesn't complain with distinct FDWs and usage names", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "fdw1", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "usage1"}, + {Name: "usage2"}, + }, + Options: []apiv1.OptionSpec{ + {Name: "option1"}, + {Name: "option2"}, + }, + }, + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "fdw2", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "usage3"}, + {Name: "usage4"}, + }, + Options: []apiv1.OptionSpec{ + {Name: "option3"}, + {Name: "option4"}, + }, + }, + }, + }, + }, + 0, + ), + + Entry( + "complain if there are duplicate FDWs", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + createFDWSpec("postgres_fdw"), + createFDWSpec("mysql_fdw"), + createFDWSpec("postgres_fdw"), + }, + }, + }, + 1, + ), + + Entry( + "complain if there are duplicate usage names within an FDW", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "postgre_fdw", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "usage1"}, + {Name: "usage2"}, + {Name: "usage1"}, + }, + }, + }, + }, + }, + 1, + ), + + Entry( + "complains for duplicate FDW and duplicate usage names", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "duplicate_fdw", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "dup_usage"}, + {Name: "dup_usage"}, + }, + Options: []apiv1.OptionSpec{ + {Name: "dup_option"}, + {Name: "dup_option"}, + }, + }, + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "duplicate_fdw", + Ensure: apiv1.EnsurePresent, + }, + }, + }, + }, + }, + 3, + ), + ) +}) diff --git a/internal/webhook/v1/doc.go b/internal/webhook/v1/doc.go new file mode 100644 index 0000000000..2f62eb58a8 --- /dev/null +++ b/internal/webhook/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package v1 contains the webhooks for the postgresql v1 API group +package v1 diff --git a/internal/webhook/v1/pooler_webhook.go b/internal/webhook/v1/pooler_webhook.go new file mode 100644 index 0000000000..b8c1070c99 --- /dev/null +++ b/internal/webhook/v1/pooler_webhook.go @@ -0,0 +1,257 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// AllowedPgbouncerGenericConfigurationParameters is the list of allowed parameters for PgBouncer +var AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{ + "application_name_add_host", + "autodb_idle_timeout", + "cancel_wait_timeout", + "client_idle_timeout", + "client_login_timeout", + "default_pool_size", + "disable_pqexec", + "dns_max_ttl", + "dns_nxdomain_ttl", + "idle_transaction_timeout", + "ignore_startup_parameters", + "listen_backlog", + "log_connections", + "log_disconnections", + "log_pooler_errors", + "log_stats", + "max_client_conn", + "max_db_connections", + "max_packet_size", + "max_prepared_statements", + "max_user_connections", + "min_pool_size", + "pkt_buf", + "query_timeout", + "query_wait_timeout", + "reserve_pool_size", + "reserve_pool_timeout", + "sbuf_loopcnt", + "server_check_delay", + "server_check_query", + "server_connect_timeout", + "server_fast_close", + "server_idle_timeout", + "server_lifetime", + "server_login_retry", + "server_reset_query", + "server_reset_query_always", + "server_round_robin", + "server_tls_ciphers", + "server_tls_protocols", + "stats_period", + "suspend_timeout", + "tcp_defer_accept", + "tcp_socket_buffer", + "tcp_keepalive", + "tcp_keepcnt", + "tcp_keepidle", + "tcp_keepintvl", + "tcp_user_timeout", + "track_extra_parameters", + "verbose", +}) + +// poolerLog is for logging in this package. +var poolerLog = log.WithName("pooler-resource").WithValues("version", "v1") + +// SetupPoolerWebhookWithManager registers the webhook for Pooler in the manager. +func SetupPoolerWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Pooler{}). + WithValidator(newBypassableValidator(&PoolerCustomValidator{})). + Complete() +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-pooler,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=poolers,versions=v1,name=vpooler.cnpg.io,sideEffects=None + +// PoolerCustomValidator struct is responsible for validating the Pooler resource +// when it is created, updated, or deleted. +type PoolerCustomValidator struct{} + +var _ webhook.CustomValidator = &PoolerCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Pooler. +func (v *PoolerCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + pooler, ok := obj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object but got %T", obj) + } + poolerLog.Info("Validation for Pooler upon creation", "name", pooler.GetName(), "namespace", pooler.GetNamespace()) + + var warns admission.Warnings + if !pooler.IsAutomatedIntegration() { + poolerLog.Info("Pooler not automatically configured, manual configuration required", + "name", pooler.Name, "namespace", pooler.Namespace, "cluster", pooler.Spec.Cluster.Name) + warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ + "Manually configure it as described in the docs.", pooler.Name, pooler.Spec.Cluster.Name, pooler.Namespace)) + } + + allErrs := v.validate(pooler) + + if len(allErrs) == 0 { + return warns, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, + pooler.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Pooler. +func (v *PoolerCustomValidator) ValidateUpdate( + _ context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + pooler, ok := newObj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object for the newObj but got %T", newObj) + } + + oldPooler, ok := oldObj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object for the oldObj but got %T", oldObj) + } + + poolerLog.Info("Validation for Pooler upon update", "name", pooler.GetName(), "namespace", pooler.GetNamespace()) + + var warns admission.Warnings + if oldPooler.IsAutomatedIntegration() && !pooler.IsAutomatedIntegration() { + poolerLog.Info("Pooler not automatically configured, manual configuration required", + "name", pooler.Name, "namespace", pooler.Namespace, "cluster", pooler.Spec.Cluster.Name) + warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ + "Manually configure it as described in the docs.", pooler.Name, pooler.Spec.Cluster.Name, pooler.Namespace)) + } + + allErrs := v.validate(pooler) + if len(allErrs) == 0 { + return warns, nil + } + + return warns, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, + pooler.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Pooler. +func (v *PoolerCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + pooler, ok := obj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object but got %T", obj) + } + poolerLog.Info("Validation for Pooler upon deletion", "name", pooler.GetName(), "namespace", pooler.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +func (v *PoolerCustomValidator) validatePgBouncer(r *apiv1.Pooler) field.ErrorList { + var result field.ErrorList + switch { + case r.Spec.PgBouncer == nil: + result = append(result, + field.Invalid( + field.NewPath("spec", "pgbouncer"), + "", "required pgbouncer configuration")) + case r.Spec.PgBouncer.AuthQuerySecret != nil && r.Spec.PgBouncer.AuthQuerySecret.Name != "" && + r.Spec.PgBouncer.AuthQuery == "": + result = append(result, + field.Invalid( + field.NewPath("spec", "pgbouncer", "authQuery"), + "", "must specify an auth query when providing an auth query secret")) + case (r.Spec.PgBouncer.AuthQuerySecret == nil || r.Spec.PgBouncer.AuthQuerySecret.Name == "") && + r.Spec.PgBouncer.AuthQuery != "": + result = append(result, + field.Invalid( + field.NewPath("spec", "pgbouncer", "authQuerySecret", "name"), + "", "must specify an existing auth query secret when providing an auth query secret")) + } + + if r.Spec.PgBouncer != nil && len(r.Spec.PgBouncer.Parameters) > 0 { + result = append(result, v.validatePgbouncerGenericParameters(r)...) + } + + return result +} + +func (v *PoolerCustomValidator) validateCluster(r *apiv1.Pooler) field.ErrorList { + var result field.ErrorList + if r.Spec.Cluster.Name == "" { + result = append(result, + field.Invalid( + field.NewPath("spec", "cluster", "name"), + "", "must specify a cluster name")) + } + if r.Spec.Cluster.Name == r.Name { + result = append(result, + field.Invalid( + field.NewPath("metadata", "name"), + r.Name, "the pooler resource cannot have the same name of a cluster")) + } + return result +} + +// validate validates the configuration of a Pooler, returning +// a list of errors +func (v *PoolerCustomValidator) validate(r *apiv1.Pooler) (allErrs field.ErrorList) { + allErrs = append(allErrs, v.validatePgBouncer(r)...) + allErrs = append(allErrs, v.validateCluster(r)...) + return allErrs +} + +// validatePgbouncerGenericParameters validates pgbouncer parameters +func (v *PoolerCustomValidator) validatePgbouncerGenericParameters(r *apiv1.Pooler) field.ErrorList { + var result field.ErrorList + + for param := range r.Spec.PgBouncer.Parameters { + if !AllowedPgbouncerGenericConfigurationParameters.Has(param) { + result = append(result, + field.Invalid( + field.NewPath("spec", "cluster", "parameters"), + param, "Invalid or reserved parameter")) + } + } + return result +} diff --git a/api/v1/pooler_webhook_test.go b/internal/webhook/v1/pooler_webhook_test.go similarity index 50% rename from api/v1/pooler_webhook_test.go rename to internal/webhook/v1/pooler_webhook_test.go index a1791248c9..49579d2474 100644 --- a/api/v1/pooler_webhook_test.go +++ b/internal/webhook/v1/pooler_webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 @@ -19,113 +22,120 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("Pooler validation", func() { + var v *PoolerCustomValidator + BeforeEach(func() { + v = &PoolerCustomValidator{} + }) + It("doesn't allow specifying authQuerySecret without any authQuery", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ - AuthQuerySecret: &LocalObjectReference{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ + AuthQuerySecret: &apiv1.LocalObjectReference{ Name: "test", }, }, }, } - Expect(pooler.validatePgBouncer()).NotTo(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).NotTo(BeEmpty()) }) It("doesn't allow specifying authQuery without any authQuerySecret", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ AuthQuery: "test", }, }, } - Expect(pooler.validatePgBouncer()).NotTo(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).NotTo(BeEmpty()) }) It("allows having both authQuery and authQuerySecret", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ AuthQuery: "test", - AuthQuerySecret: &LocalObjectReference{ + AuthQuerySecret: &apiv1.LocalObjectReference{ Name: "test", }, }, }, } - Expect(pooler.validatePgBouncer()).To(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).To(BeEmpty()) }) It("allows the autoconfiguration mode", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{}, + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{}, }, } - Expect(pooler.validatePgBouncer()).To(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).To(BeEmpty()) }) It("doesn't allow not specifying a cluster name", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - Cluster: LocalObjectReference{Name: ""}, + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + Cluster: apiv1.LocalObjectReference{Name: ""}, }, } - Expect(pooler.validateCluster()).NotTo(BeEmpty()) + Expect(v.validateCluster(pooler)).NotTo(BeEmpty()) }) It("doesn't allow to have a pooler with the same name of the cluster", func() { - pooler := Pooler{ + pooler := &apiv1.Pooler{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, - Spec: PoolerSpec{ - Cluster: LocalObjectReference{ + Spec: apiv1.PoolerSpec{ + Cluster: apiv1.LocalObjectReference{ Name: "test", }, }, } - Expect(pooler.validateCluster()).NotTo(BeEmpty()) + Expect(v.validateCluster(pooler)).NotTo(BeEmpty()) }) It("doesn't complain when specifying a cluster name", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - Cluster: LocalObjectReference{Name: "cluster-example"}, + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + Cluster: apiv1.LocalObjectReference{Name: "cluster-example"}, }, } - Expect(pooler.validateCluster()).To(BeEmpty()) + Expect(v.validateCluster(pooler)).To(BeEmpty()) }) It("does complain when given a fixed parameter", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ Parameters: map[string]string{"pool_mode": "test"}, }, }, } - Expect(pooler.validatePgbouncerGenericParameters()).NotTo(BeEmpty()) + Expect(v.validatePgbouncerGenericParameters(pooler)).NotTo(BeEmpty()) }) It("does not complain when given a valid parameter", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ Parameters: map[string]string{"verbose": "10"}, }, }, } - Expect(pooler.validatePgbouncerGenericParameters()).To(BeEmpty()) + Expect(v.validatePgbouncerGenericParameters(pooler)).To(BeEmpty()) }) }) diff --git a/internal/webhook/v1/scheduledbackup_webhook.go b/internal/webhook/v1/scheduledbackup_webhook.go new file mode 100644 index 0000000000..a391af4ba0 --- /dev/null +++ b/internal/webhook/v1/scheduledbackup_webhook.go @@ -0,0 +1,193 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + "strings" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/robfig/cron" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// scheduledBackupLog is for logging in this package. +var scheduledBackupLog = log.WithName("scheduledbackup-resource").WithValues("version", "v1") + +// SetupScheduledBackupWebhookWithManager registers the webhook for ScheduledBackup in the manager. +func SetupScheduledBackupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.ScheduledBackup{}). + WithValidator(&ScheduledBackupCustomValidator{}). + WithDefaulter(&ScheduledBackupCustomDefaulter{}). + Complete() +} + +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-scheduledbackup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,verbs=create;update,versions=v1,name=mscheduledbackup.cnpg.io,sideEffects=None + +// ScheduledBackupCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind ScheduledBackup when those are created or updated. +type ScheduledBackupCustomDefaulter struct{} + +var _ webhook.CustomDefaulter = &ScheduledBackupCustomDefaulter{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind ScheduledBackup. +func (d *ScheduledBackupCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + scheduledBackup, ok := obj.(*apiv1.ScheduledBackup) + if !ok { + return fmt.Errorf("expected an ScheduledBackup object but got %T", obj) + } + scheduledBackupLog.Info("Defaulting for ScheduledBackup", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + // TODO(user): fill in your defaulting logic. + + return nil +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-scheduledbackup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,versions=v1,name=vscheduledbackup.cnpg.io,sideEffects=None + +// ScheduledBackupCustomValidator struct is responsible for validating the ScheduledBackup resource +// when it is created, updated, or deleted. +type ScheduledBackupCustomValidator struct { + // TODO(user): Add more fields as needed for validation +} + +var _ webhook.CustomValidator = &ScheduledBackupCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup. +func (v *ScheduledBackupCustomValidator) ValidateCreate( + _ context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + scheduledBackup, ok := obj.(*apiv1.ScheduledBackup) + if !ok { + return nil, fmt.Errorf("expected a ScheduledBackup object but got %T", obj) + } + scheduledBackupLog.Info("Validation for ScheduledBackup upon creation", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + warnings, allErrs := v.validate(scheduledBackup) + if len(allErrs) == 0 { + return warnings, nil + } + + return warnings, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "ScheduledBackup"}, + scheduledBackup.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup. +func (v *ScheduledBackupCustomValidator) ValidateUpdate( + _ context.Context, + _, newObj runtime.Object, +) (admission.Warnings, error) { + scheduledBackup, ok := newObj.(*apiv1.ScheduledBackup) + if !ok { + return nil, fmt.Errorf("expected a ScheduledBackup object for the newObj but got %T", newObj) + } + scheduledBackupLog.Info("Validation for ScheduledBackup upon update", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + warnings, allErrs := v.validate(scheduledBackup) + if len(allErrs) == 0 { + return warnings, nil + } + + return warnings, apierrors.NewInvalid( + schema.GroupKind{Group: "scheduledBackup.cnpg.io", Kind: "ScheduledBackup"}, + scheduledBackup.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup. +func (v *ScheduledBackupCustomValidator) ValidateDelete( + _ context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + scheduledBackup, ok := obj.(*apiv1.ScheduledBackup) + if !ok { + return nil, fmt.Errorf("expected a ScheduledBackup object but got %T", obj) + } + scheduledBackupLog.Info("Validation for ScheduledBackup upon deletion", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +func (v *ScheduledBackupCustomValidator) validate(r *apiv1.ScheduledBackup) (admission.Warnings, field.ErrorList) { + var result field.ErrorList + var warnings admission.Warnings + + if _, err := cron.Parse(r.GetSchedule()); err != nil { + result = append(result, + field.Invalid( + field.NewPath("spec", "schedule"), + r.Spec.Schedule, err.Error())) + } else if len(strings.Fields(r.Spec.Schedule)) != 6 { + warnings = append( + warnings, + "Schedule parameter may not have the right number of arguments "+ + "(usually six arguments are needed)", + ) + } + + if r.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { + result = append(result, field.Invalid( + field.NewPath("spec", "method"), + r.Spec.Method, + "Cannot use volumeSnapshot backup method due to missing "+ + "VolumeSnapshot CRD. If you installed the CRD after having "+ + "started the operator, please restart it to enable "+ + "VolumeSnapshot support", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.Online != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "online"), + r.Spec.Online, + "Online parameter can be specified only if the method is volumeSnapshot", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "onlineConfiguration"), + r.Spec.OnlineConfiguration, + "OnlineConfiguration parameter can be specified only if the method is volumeSnapshot", + )) + } + + return warnings, result +} diff --git a/internal/webhook/v1/scheduledbackup_webhook_test.go b/internal/webhook/v1/scheduledbackup_webhook_test.go new file mode 100644 index 0000000000..80dc6a86ed --- /dev/null +++ b/internal/webhook/v1/scheduledbackup_webhook_test.go @@ -0,0 +1,129 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "k8s.io/utils/ptr" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Validate schedule", func() { + var v *ScheduledBackupCustomValidator + BeforeEach(func() { + v = &ScheduledBackupCustomValidator{} + }) + + It("doesn't complain if there's a schedule", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * *", + }, + } + + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(BeEmpty()) + }) + + It("warn the user if the schedule has a wrong number of arguments", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "1 2 3 4 5", + }, + } + + warnings, result := v.validate(schedule) + Expect(warnings).To(HaveLen(1)) + Expect(result).To(BeEmpty()) + }) + + It("complain with a wrong time", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * * 1996", + }, + } + + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + }) + + It("doesn't complain if VolumeSnapshot CRD is present", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * *", + Method: apiv1.BackupMethodVolumeSnapshot, + }, + } + utils.SetVolumeSnapshot(true) + + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(BeEmpty()) + }) + + It("complains if VolumeSnapshot CRD is not present", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * *", + Method: apiv1.BackupMethodVolumeSnapshot, + }, + } + utils.SetVolumeSnapshot(false) + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.method")) + }) + + It("complains if online is set on a barman backup", func() { + scheduledBackup := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + Online: ptr.To(true), + Schedule: "* * * * * *", + }, + } + warnings, result := v.validate(scheduledBackup) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.online")) + }) + + It("complains if onlineConfiguration is set on a barman backup", func() { + scheduledBackup := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + OnlineConfiguration: &apiv1.OnlineConfiguration{}, + Schedule: "* * * * * *", + }, + } + warnings, result := v.validate(scheduledBackup) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) + }) +}) diff --git a/pkg/management/postgres/webserver/suite_test.go b/internal/webhook/v1/suite_test.go similarity index 75% rename from pkg/management/postgres/webserver/suite_test.go rename to internal/webhook/v1/suite_test.go index 34419aa9c8..1fb13ba7db 100644 --- a/pkg/management/postgres/webserver/suite_test.go +++ b/internal/webhook/v1/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package webserver +package v1 import ( "testing" @@ -23,7 +26,8 @@ import ( . "github.com/onsi/gomega" ) -func TestMetricsServer(t *testing.T) { +func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Postgres Webserver test suite") + + RunSpecs(t, "Webhook Suite") } diff --git a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE b/licenses/go-licenses/github.com/blang/semver/v4/LICENSE similarity index 92% rename from licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE rename to licenses/go-licenses/github.com/blang/semver/v4/LICENSE index 31f292dce5..5ba5c86fcb 100644 --- a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE +++ b/licenses/go-licenses/github.com/blang/semver/v4/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +The MIT License -Copyright (c) 2018 QRI, Inc. +Copyright (c) 2014 Benedikt Lang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/licenses/go-licenses/github.com/google/gofuzz/LICENSE b/licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE similarity index 99% rename from licenses/go-licenses/github.com/google/gofuzz/LICENSE rename to licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE index d645695673..261eeb9e9f 100644 --- a/licenses/go-licenses/github.com/google/gofuzz/LICENSE +++ b/licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/licenses/go-licenses/github.com/klauspost/compress/LICENSE b/licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE similarity index 67% rename from licenses/go-licenses/github.com/klauspost/compress/LICENSE rename to licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE index 87d5574777..261eeb9e9f 100644 --- a/licenses/go-licenses/github.com/klauspost/compress/LICENSE +++ b/licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE @@ -1,36 +1,3 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -219,7 +186,7 @@ Files: gzhttp/* same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016-2017 The New York Times Company + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -232,73 +199,3 @@ Files: gzhttp/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/fatih/color/LICENSE.md b/licenses/go-licenses/github.com/fatih/color/LICENSE.md new file mode 100644 index 0000000000..25fdaf639d --- /dev/null +++ b/licenses/go-licenses/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE b/licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 0000000000..eaa8504921 --- /dev/null +++ b/licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE b/licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/go-licenses/github.com/golang/protobuf/LICENSE b/licenses/go-licenses/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a4..0000000000 --- a/licenses/go-licenses/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/licenses/go-licenses/github.com/imdario/mergo/LICENSE b/licenses/go-licenses/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298d..0000000000 --- a/licenses/go-licenses/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE b/licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/mattn/go-colorable/LICENSE b/licenses/go-licenses/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/licenses/go-licenses/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/go-licenses/github.com/mattn/go-isatty/LICENSE b/licenses/go-licenses/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000000..65dc692b6b --- /dev/null +++ b/licenses/go-licenses/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE b/licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE new file mode 100644 index 0000000000..c67dad612a --- /dev/null +++ b/licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE b/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE index e06d208186..74e6ec6963 100644 --- a/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE +++ b/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE @@ -176,7 +176,7 @@ Apache License END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. - + To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include @@ -199,4 +199,3 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/licenses/go-licenses/gopkg.in/yaml.v2/LICENSE b/licenses/go-licenses/github.com/stern/stern/stern/LICENSE similarity index 100% rename from licenses/go-licenses/gopkg.in/yaml.v2/LICENSE rename to licenses/go-licenses/github.com/stern/stern/stern/LICENSE diff --git a/licenses/go-licenses/github.com/x448/float16/LICENSE b/licenses/go-licenses/github.com/x448/float16/LICENSE new file mode 100644 index 0000000000..bf6e357854 --- /dev/null +++ b/licenses/go-licenses/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/go-licenses/go.starlark.net/LICENSE b/licenses/go-licenses/go.starlark.net/LICENSE deleted file mode 100644 index a6609a1437..0000000000 --- a/licenses/go-licenses/go.starlark.net/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2017 The Bazel Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/licenses/go-licenses/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v4/LICENSE rename to licenses/go-licenses/go.yaml.in/yaml/v2/LICENSE diff --git a/licenses/go-licenses/gopkg.in/yaml.v2/NOTICE b/licenses/go-licenses/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from licenses/go-licenses/gopkg.in/yaml.v2/NOTICE rename to licenses/go-licenses/go.yaml.in/yaml/v2/NOTICE diff --git a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE b/licenses/go-licenses/go.yaml.in/yaml/v3/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE rename to licenses/go-licenses/go.yaml.in/yaml/v3/LICENSE diff --git a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE b/licenses/go-licenses/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE rename to licenses/go-licenses/go.yaml.in/yaml/v3/NOTICE diff --git a/licenses/go-licenses/golang.org/x/exp/LICENSE b/licenses/go-licenses/golang.org/x/exp/LICENSE deleted file mode 100644 index 2a7cf70da6..0000000000 --- a/licenses/go-licenses/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/golang.org/x/oauth2/LICENSE b/licenses/go-licenses/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/licenses/go-licenses/golang.org/x/oauth2/LICENSE +++ b/licenses/go-licenses/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/licenses/go-licenses/golang.org/x/time/rate/LICENSE b/licenses/go-licenses/golang.org/x/time/rate/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/licenses/go-licenses/golang.org/x/time/rate/LICENSE +++ b/licenses/go-licenses/golang.org/x/time/rate/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/licenses/go-licenses/github.com/evanphx/json-patch/LICENSE b/licenses/go-licenses/gopkg.in/evanphx/json-patch.v4/LICENSE similarity index 100% rename from licenses/go-licenses/github.com/evanphx/json-patch/LICENSE rename to licenses/go-licenses/gopkg.in/evanphx/json-patch.v4/LICENSE diff --git a/licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/net/LICENSE b/licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/LICENSE similarity index 100% rename from licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/net/LICENSE rename to licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/LICENSE diff --git a/licenses/go-licenses/sigs.k8s.io/randfill/LICENSE b/licenses/go-licenses/sigs.k8s.io/randfill/LICENSE new file mode 100644 index 0000000000..9dd29274c3 --- /dev/null +++ b/licenses/go-licenses/sigs.k8s.io/randfill/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 The gofuzz Authors + Copyright 2025 The Kubernetes Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/go-licenses/sigs.k8s.io/randfill/NOTICE b/licenses/go-licenses/sigs.k8s.io/randfill/NOTICE new file mode 100644 index 0000000000..6984e71f65 --- /dev/null +++ b/licenses/go-licenses/sigs.k8s.io/randfill/NOTICE @@ -0,0 +1,24 @@ +When donating the randfill project to the CNCF, we could not reach all the +gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules +to donate a repository, we must add a NOTICE referencing section 7 of the CLA +with a list of developers who could not be reached. + +`7. Should You wish to submit work that is not Your original creation, You may +submit it to the Foundation separately from any Contribution, identifying the +complete details of its source and of any license or other restriction +(including, but not limited to, related patents, trademarks, and license +agreements) of which you are personally aware, and conspicuously marking the +work as "Submitted on behalf of a third-party: [named here]".` + +Submitted on behalf of a third-party: @dnephin (Daniel Nephin) +Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko) +Submitted on behalf of a third-party: @bbigras (Bruno Bigras) +Submitted on behalf of a third-party: @samirkut (Samir) +Submitted on behalf of a third-party: @posener (Eyal Posener) +Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul) +Submitted on behalf of a third-party: @kwongtailau (Kwongtai) +Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen) +Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin) +Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan) +Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU) +Submitted on behalf of a third-party: @disconnect3d (Disconnect3d) diff --git a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v6/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/LICENSE rename to licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v6/LICENSE diff --git a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/NOTICE deleted file mode 100644 index 866d74a7ad..0000000000 --- a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go index 6efd147c01..7c177bfae3 100644 --- a/pkg/certs/certs.go +++ b/pkg/certs/certs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package certs handle the PKI infrastructure of the operator @@ -191,11 +194,11 @@ func (pair KeyPair) createAndSignPairWithValidity( } leafTemplate.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement - switch { - case usage == CertTypeClient: + switch usage { + case CertTypeClient: leafTemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - case usage == CertTypeServer: + case CertTypeServer: leafTemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} leafTemplate.KeyUsage |= x509.KeyUsageKeyEncipherment diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go index aa85c9e0da..f1b44ae4e1 100644 --- a/pkg/certs/certs_test.go +++ b/pkg/certs/certs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs @@ -343,20 +346,22 @@ var _ = Describe("Certicate duration and expiration threshold", func() { defaultExpiringThreshold := configuration.ExpiringCheckThreshold * 24 * time.Hour tenDays := 10 * 24 * time.Hour + BeforeEach(func() { + configuration.Current = configuration.NewConfiguration() + }) + It("returns the default duration", func() { duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(defaultCertificateDuration)) }) It("returns the default duration if the configuration is a negative value", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.CertificateDuration = -1 duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(defaultCertificateDuration)) }) It("returns a valid duration of 10 days", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.CertificateDuration = 10 duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(tenDays)) @@ -368,14 +373,12 @@ var _ = Describe("Certicate duration and expiration threshold", func() { }) It("returns the default check threshold if the configuration is a negative value", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.ExpiringCheckThreshold = -1 threshold := getCheckThreshold() Expect(threshold).To(BeEquivalentTo(defaultExpiringThreshold)) }) It("returns a valid threshold of 10 days", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.ExpiringCheckThreshold = 10 threshold := getCheckThreshold() Expect(threshold).To(BeEquivalentTo(tenDays)) diff --git a/pkg/certs/k8s.go b/pkg/certs/k8s.go index d12e3b8d03..a974927718 100644 --- a/pkg/certs/k8s.go +++ b/pkg/certs/k8s.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/k8s_test.go b/pkg/certs/k8s_test.go index 54f54c9044..effa53106e 100644 --- a/pkg/certs/k8s_test.go +++ b/pkg/certs/k8s_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs @@ -163,7 +166,7 @@ var _ = Describe("Root CA secret generation", func() { }) var _ = Describe("Webhook certificate validation", func() { - When("we have a valid CA secret", func() { + When("we have a valid CA secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate @@ -196,7 +199,7 @@ var _ = Describe("Webhook certificate validation", func() { }) }) - When("we have a valid CA and webhook secret", func() { + When("we have a valid CA and webhook secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate var caSecret, webhookSecret *corev1.Secret @@ -220,7 +223,7 @@ var _ = Describe("Webhook certificate validation", func() { }) }) - When("we have a valid CA secret and expired webhook secret", func() { + When("we have a valid CA secret and expired webhook secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate diff --git a/pkg/certs/operator_deployment.go b/pkg/certs/operator_deployment.go index 92c897e21f..0a294a7d63 100644 --- a/pkg/certs/operator_deployment.go +++ b/pkg/certs/operator_deployment.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/operator_deployment_test.go b/pkg/certs/operator_deployment_test.go index 96e9adc72b..6ba58ad9ec 100644 --- a/pkg/certs/operator_deployment_test.go +++ b/pkg/certs/operator_deployment_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/suite_test.go b/pkg/certs/suite_test.go index f3c6358576..fa2ded2e04 100644 --- a/pkg/certs/suite_test.go +++ b/pkg/certs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/tls.go b/pkg/certs/tls.go index 41e72396aa..68b41ebcf3 100644 --- a/pkg/certs/tls.go +++ b/pkg/certs/tls.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs @@ -54,9 +57,18 @@ func newTLSConfigFromSecret( // for the -rw service, which would cause a name verification error. caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCertificate) + + return NewTLSConfigFromCertPool(caCertPool), nil +} + +// NewTLSConfigFromCertPool creates a tls.Config object from X509 cert pool +// containing the expected server CA +func NewTLSConfigFromCertPool( + certPool *x509.CertPool, +) *tls.Config { tlsConfig := tls.Config{ MinVersion: tls.VersionTLS13, - RootCAs: caCertPool, + RootCAs: certPool, InsecureSkipVerify: true, //#nosec G402 -- we are verifying the certificate ourselves VerifyPeerCertificate: func(rawCerts [][]byte, _ [][]*x509.Certificate) error { // Code adapted from https://go.dev/src/crypto/tls/handshake_client.go#L986 @@ -74,7 +86,7 @@ func newTLSConfigFromSecret( } opts := x509.VerifyOptions{ - Roots: caCertPool, + Roots: certPool, Intermediates: x509.NewCertPool(), } @@ -90,7 +102,7 @@ func newTLSConfigFromSecret( }, } - return &tlsConfig, nil + return &tlsConfig } // NewTLSConfigForContext creates a tls.config with the provided data and returns an expanded context that contains diff --git a/pkg/certs/tls_test.go b/pkg/certs/tls_test.go index 8e99876520..6c62242280 100644 --- a/pkg/certs/tls_test.go +++ b/pkg/certs/tls_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs @@ -35,13 +38,11 @@ import ( var _ = Describe("newTLSConfigFromSecret", func() { var ( - ctx context.Context c client.Client caSecret types.NamespacedName ) BeforeEach(func() { - ctx = context.TODO() caSecret = types.NamespacedName{Name: "test-secret", Namespace: "default"} }) @@ -276,7 +277,7 @@ MQCKGqId+Xj6O6gnoi9xhu0rbzSnMjrURoa1v2d5+O5XssE7LGtJdIKrd2p7EuwE c = fake.NewClientBuilder().Build() }) - It("should return an error", func() { + It("should return an error", func(ctx SpecContext) { tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret) Expect(err).To(HaveOccurred()) Expect(tlsConfig).To(BeNil()) @@ -295,7 +296,7 @@ MQCKGqId+Xj6O6gnoi9xhu0rbzSnMjrURoa1v2d5+O5XssE7LGtJdIKrd2p7EuwE c = fake.NewClientBuilder().WithObjects(secret).Build() }) - It("should return an error", func() { + It("should return an error", func(ctx SpecContext) { tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret) Expect(err).To(HaveOccurred()) Expect(tlsConfig).To(BeNil()) diff --git a/pkg/concurrency/doc.go b/pkg/concurrency/doc.go index 7565222dab..0394bf9285 100644 --- a/pkg/concurrency/doc.go +++ b/pkg/concurrency/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package concurrency contains utilities for goroutines coordination diff --git a/pkg/concurrency/executed.go b/pkg/concurrency/executed.go index d8375986e5..6a0a11291c 100644 --- a/pkg/concurrency/executed.go +++ b/pkg/concurrency/executed.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,16 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package concurrency import ( "sync" + + "go.uber.org/multierr" ) // Executed can be used to wait for something to be executed, @@ -27,6 +32,7 @@ import ( type Executed struct { cond sync.Cond done bool + err error } // MultipleExecuted can be used to wrap multiple Executed conditions that @@ -40,6 +46,16 @@ func (m MultipleExecuted) Wait() { } } +// Err returns a composition of the errors raised by the individual +// execution components or nil if there is no error. +func (m MultipleExecuted) Err() error { + var err error + for _, cond := range m { + err = multierr.Append(err, cond.Err()) + } + return err +} + // NewExecuted creates a new Executed func NewExecuted() *Executed { return &Executed{ @@ -59,10 +75,26 @@ func (i *Executed) Wait() { // Broadcast broadcasts execution to waiting goroutines func (i *Executed) Broadcast() { + i.BroadcastError(nil) +} + +// BroadcastError broadcasts execution to waiting goroutines +// recording the passed error status +func (i *Executed) BroadcastError(err error) { i.cond.L.Lock() defer i.cond.L.Unlock() if !i.done { + i.err = err i.done = true i.cond.Broadcast() } } + +// Err returns the error passed to BroadcastError if it was +// executed or nil. +func (i *Executed) Err() error { + if !i.done { + return nil + } + return i.err +} diff --git a/pkg/concurrency/executed_test.go b/pkg/concurrency/executed_test.go index ae7dff79e4..0e6054ca2c 100644 --- a/pkg/concurrency/executed_test.go +++ b/pkg/concurrency/executed_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package concurrency diff --git a/pkg/concurrency/suite_test.go b/pkg/concurrency/suite_test.go index e75a189d0d..fb6f57c6e5 100644 --- a/pkg/concurrency/suite_test.go +++ b/pkg/concurrency/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package concurrency diff --git a/pkg/conditions/conditions.go b/pkg/conditions/conditions.go deleted file mode 100644 index 80b6d5af7d..0000000000 --- a/pkg/conditions/conditions.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conditions - -import ( - "context" - "reflect" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// Patch will patch a particular condition in cluster status. -func Patch( - ctx context.Context, - c client.Client, - cluster *apiv1.Cluster, - condition *metav1.Condition, -) error { - if cluster == nil || condition == nil { - return nil - } - existingCluster := cluster.DeepCopy() - meta.SetStatusCondition(&cluster.Status.Conditions, *condition) - - if !reflect.DeepEqual(existingCluster.Status.Conditions, cluster.Status.Conditions) { - // To avoid conflict using patch instead of update - if err := c.Status().Patch(ctx, cluster, client.MergeFrom(existingCluster)); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/configfile/configfile.go b/pkg/configfile/configfile.go index 14ac64bcc1..28adccbd30 100644 --- a/pkg/configfile/configfile.go +++ b/pkg/configfile/configfile.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package configfile contains primitives needed to manage a configuration file @@ -23,9 +26,8 @@ import ( "strings" "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/lib/pq" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) // UpdatePostgresConfigurationFile search and replace options in a Postgres configuration file. diff --git a/pkg/configfile/configfile_test.go b/pkg/configfile/configfile_test.go index 3d2854ff05..6a7706b7c7 100644 --- a/pkg/configfile/configfile_test.go +++ b/pkg/configfile/configfile_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile @@ -20,10 +23,10 @@ import ( "os" "path/filepath" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "github.com/cloudnative-pg/machinery/pkg/fileutils" ) var _ = Describe("update Postgres configuration files", func() { diff --git a/pkg/configfile/connection_string.go b/pkg/configfile/connection_string.go index f2610cf9e1..8ac53e5e47 100644 --- a/pkg/configfile/connection_string.go +++ b/pkg/configfile/connection_string.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configfile/connection_string_test.go b/pkg/configfile/connection_string_test.go index ef80b61c16..510e1f0367 100644 --- a/pkg/configfile/connection_string_test.go +++ b/pkg/configfile/connection_string_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configfile/suite_test.go b/pkg/configfile/suite_test.go index 76d4a19203..47aeb5a7d8 100644 --- a/pkg/configfile/suite_test.go +++ b/pkg/configfile/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configparser/configparser.go b/pkg/configparser/configparser.go index 40c2a8ed14..af87d657dd 100644 --- a/pkg/configparser/configparser.go +++ b/pkg/configparser/configparser.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ /* @@ -49,6 +52,7 @@ package configparser import ( "fmt" + "os" "reflect" "strconv" "strings" @@ -60,7 +64,7 @@ var configparserLog = log.WithName("configparser") // ReadConfigMap reads the configuration from the environment and the passed in data map. // Config and defaults are supposed to be pointers to structs of the same type -func ReadConfigMap(target interface{}, defaults interface{}, data map[string]string, env EnvironmentSource) { +func ReadConfigMap(target interface{}, defaults interface{}, data map[string]string) { ensurePointerToCompatibleStruct("target", target, "default", defaults) count := reflect.TypeOf(defaults).Elem().NumField() @@ -98,7 +102,7 @@ func ReadConfigMap(target interface{}, defaults interface{}, data map[string]str value = valueField.String() } // If the key is present in the environment, use its value - if envValue := env.Getenv(envName); envValue != "" { + if envValue := os.Getenv(envName); envValue != "" { value = envValue } // If the key is present in the passed data, use its value diff --git a/pkg/configparser/configparser_test.go b/pkg/configparser/configparser_test.go index a58b8852c8..709d7b3b81 100644 --- a/pkg/configparser/configparser_test.go +++ b/pkg/configparser/configparser_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configparser @@ -52,8 +55,8 @@ var defaultInheritedAnnotations = []string{ const oneNamespace = "one-namespace" // readConfigMap reads the configuration from the environment and the passed in data map -func (config *FakeData) readConfigMap(data map[string]string, env EnvironmentSource) { - ReadConfigMap(config, &FakeData{InheritedAnnotations: defaultInheritedAnnotations}, data, env) +func (config *FakeData) readConfigMap(data map[string]string) { + ReadConfigMap(config, &FakeData{InheritedAnnotations: defaultInheritedAnnotations}, data) } var _ = Describe("Data test suite", func() { @@ -64,11 +67,14 @@ var _ = Describe("Data test suite", func() { It("loads values from a map", func() { config := &FakeData{} + GinkgoT().Setenv("WATCH_NAMESPACE", "") + GinkgoT().Setenv("INHERITED_ANNOTATIONS", "") + GinkgoT().Setenv("INHERITED_LABELS", "") config.readConfigMap(map[string]string{ "WATCH_NAMESPACE": oneNamespace, "INHERITED_ANNOTATIONS": "one, two", "INHERITED_LABELS": "alpha, beta", - }, NewFakeEnvironment(nil)) + }) Expect(config.WatchNamespace).To(Equal(oneNamespace)) Expect(config.InheritedAnnotations).To(Equal([]string{"one", "two"})) Expect(config.InheritedLabels).To(Equal([]string{"alpha", "beta"})) @@ -76,13 +82,11 @@ var _ = Describe("Data test suite", func() { It("loads values from environment", func() { config := &FakeData{} - fakeEnv := NewFakeEnvironment(map[string]string{ - "WATCH_NAMESPACE": oneNamespace, - "INHERITED_ANNOTATIONS": "one, two", - "INHERITED_LABELS": "alpha, beta", - "EXPIRING_CHECK_THRESHOLD": "2", - }) - config.readConfigMap(nil, fakeEnv) + GinkgoT().Setenv("WATCH_NAMESPACE", oneNamespace) + GinkgoT().Setenv("INHERITED_ANNOTATIONS", "one, two") + GinkgoT().Setenv("INHERITED_LABELS", "alpha, beta") + GinkgoT().Setenv("EXPIRING_CHECK_THRESHOLD", "2") + config.readConfigMap(nil) Expect(config.WatchNamespace).To(Equal(oneNamespace)) Expect(config.InheritedAnnotations).To(Equal([]string{"one", "two"})) Expect(config.InheritedLabels).To(Equal([]string{"alpha", "beta"})) @@ -94,43 +98,23 @@ var _ = Describe("Data test suite", func() { CertificateDuration: 90, ExpiringCheckThreshold: 7, } - fakeEnv := NewFakeEnvironment(map[string]string{ - "EXPIRING_CHECK_THRESHOLD": "3600min", - "CERTIFICATE_DURATION": "unknown", - }) + GinkgoT().Setenv("EXPIRING_CHECK_THRESHOLD", "3600min") + GinkgoT().Setenv("CERTIFICATE_DURATION", "unknown") defaultData := &FakeData{ CertificateDuration: 90, ExpiringCheckThreshold: 7, } - ReadConfigMap(config, defaultData, nil, fakeEnv) + ReadConfigMap(config, defaultData, nil) Expect(config.ExpiringCheckThreshold).To(Equal(7)) Expect(config.CertificateDuration).To(Equal(90)) }) It("handles correctly default values of slices", func() { + GinkgoT().Setenv("INHERITED_ANNOTATIONS", "") + GinkgoT().Setenv("INHERITED_LABELS", "") config := &FakeData{} - config.readConfigMap(nil, NewFakeEnvironment(nil)) + config.readConfigMap(nil) Expect(config.InheritedAnnotations).To(Equal(defaultInheritedAnnotations)) Expect(config.InheritedLabels).To(BeNil()) }) }) - -// FakeEnvironment is an EnvironmentSource that fetches data from an internal map -type FakeEnvironment struct { - values map[string]string -} - -// NewFakeEnvironment creates a FakeEnvironment with the specified data inside -func NewFakeEnvironment(data map[string]string) FakeEnvironment { - f := FakeEnvironment{} - if data == nil { - data = make(map[string]string) - } - f.values = data - return f -} - -// Getenv retrieves the value of the environment variable named by the key -func (f FakeEnvironment) Getenv(key string) string { - return f.values[key] -} diff --git a/pkg/configparser/suite_test.go b/pkg/configparser/suite_test.go index 89994b0c53..c058301bbd 100644 --- a/pkg/configparser/suite_test.go +++ b/pkg/configparser/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configparser diff --git a/pkg/executablehash/executablehash.go b/pkg/executablehash/executablehash.go index 163b0f5c24..836adb0e65 100644 --- a/pkg/executablehash/executablehash.go +++ b/pkg/executablehash/executablehash.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package executablehash detect the SHA256 of the running binary diff --git a/pkg/executablehash/executablehash_test.go b/pkg/executablehash/executablehash_test.go index b23d67c6de..fa3935d2e1 100644 --- a/pkg/executablehash/executablehash_test.go +++ b/pkg/executablehash/executablehash_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package executablehash @@ -34,14 +37,8 @@ var _ = Describe("Executable hash detection", func() { It("retrieves a hash from a given filename", func() { const expectedHash = "d6672ee3a93d0d6e3c30bdef89f310799c2f3ab781098a9792040d5541ce3ed3" const fileName = "test-hash" - var tempDir string - - DeferCleanup(func() { - Expect(os.RemoveAll(tempDir)).To(Succeed()) - }) - tempDir, err := os.MkdirTemp("", "test") - Expect(err).NotTo(HaveOccurred()) + tempDir := GinkgoT().TempDir() Expect(os.WriteFile(filepath.Join(tempDir, fileName), []byte(fileName), 0o600)).To(Succeed()) result, err := GetByName(filepath.Join(tempDir, fileName)) diff --git a/pkg/executablehash/suite_test.go b/pkg/executablehash/suite_test.go index 3263008eaf..b1d6919a48 100644 --- a/pkg/executablehash/suite_test.go +++ b/pkg/executablehash/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package executablehash diff --git a/pkg/management/client.go b/pkg/management/client.go index cd35e60485..cd13f351d2 100644 --- a/pkg/management/client.go +++ b/pkg/management/client.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package management contains all the features needed by the instance @@ -73,7 +76,7 @@ func NewControllerRuntimeClient() (client.WithWatch, error) { return nil, err } - mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{apiv1.GroupVersion}) + mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{apiv1.SchemeGroupVersion}) // add here any resource that need to be registered. objectsToRegister := []runtime.Object{ // custom resources @@ -140,7 +143,14 @@ func WaitForGetCluster(ctx context.Context, clusterObjectKey client.ObjectKey) e return err } - err = retry.OnError(readinessCheckRetry, resources.RetryAlways, func() error { + return WaitForGetClusterWithClient(ctx, cli, clusterObjectKey) +} + +// WaitForGetClusterWithClient will wait for a successful get cluster to be executed +func WaitForGetClusterWithClient(ctx context.Context, cli client.Client, clusterObjectKey client.ObjectKey) error { + logger := log.FromContext(ctx).WithName("wait-for-get-cluster") + + err := retry.OnError(readinessCheckRetry, resources.RetryAlways, func() error { if err := cli.Get(ctx, clusterObjectKey, &apiv1.Cluster{}); err != nil { logger.Warning("Encountered an error while executing get cluster. Will wait and retry", "error", err.Error()) return err diff --git a/pkg/management/external/doc.go b/pkg/management/external/doc.go index 334b5f31e1..e2bcb842c6 100644 --- a/pkg/management/external/doc.go +++ b/pkg/management/external/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package external contains the functions needed to manage servers which are external to this diff --git a/pkg/management/external/external.go b/pkg/management/external/external.go index b6413c600d..24f5c658d2 100644 --- a/pkg/management/external/external.go +++ b/pkg/management/external/external.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package external diff --git a/pkg/management/external/internal/pgpass/conninfo.go b/pkg/management/external/internal/pgpass/conninfo.go index 7c6f451972..c6484877ef 100644 --- a/pkg/management/external/internal/pgpass/conninfo.go +++ b/pkg/management/external/internal/pgpass/conninfo.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/conninfo_test.go b/pkg/management/external/internal/pgpass/conninfo_test.go index 4588707221..975b475597 100644 --- a/pkg/management/external/internal/pgpass/conninfo_test.go +++ b/pkg/management/external/internal/pgpass/conninfo_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/doc.go b/pkg/management/external/internal/pgpass/doc.go index bee39766a7..af2c86802e 100644 --- a/pkg/management/external/internal/pgpass/doc.go +++ b/pkg/management/external/internal/pgpass/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgpass allows the user to generate a PostgreSQL .pgpass file diff --git a/pkg/management/external/internal/pgpass/pgpass.go b/pkg/management/external/internal/pgpass/pgpass.go index 6f87a68b89..4ab49dd9a1 100644 --- a/pkg/management/external/internal/pgpass/pgpass.go +++ b/pkg/management/external/internal/pgpass/pgpass.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/pgpass_test.go b/pkg/management/external/internal/pgpass/pgpass_test.go index ec15759895..27fcb6da8c 100644 --- a/pkg/management/external/internal/pgpass/pgpass_test.go +++ b/pkg/management/external/internal/pgpass/pgpass_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/suite_test.go b/pkg/management/external/internal/pgpass/suite_test.go index 48bb92fff5..a09ba17025 100644 --- a/pkg/management/external/internal/pgpass/suite_test.go +++ b/pkg/management/external/internal/pgpass/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/utils.go b/pkg/management/external/utils.go index 959b2a2cfc..510fd27ed6 100644 --- a/pkg/management/external/utils.go +++ b/pkg/management/external/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package external diff --git a/pkg/management/logtest/logtest.go b/pkg/management/logtest/logtest.go index 0a90638d5e..77b797e02a 100644 --- a/pkg/management/logtest/logtest.go +++ b/pkg/management/logtest/logtest.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logtest contains the testing utils for the logging subsystem of the instance manager diff --git a/pkg/management/pgbouncer/config/config.go b/pkg/management/pgbouncer/config/config.go index 303c5c6d96..ba74f54082 100644 --- a/pkg/management/pgbouncer/config/config.go +++ b/pkg/management/pgbouncer/config/config.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/data.go b/pkg/management/pgbouncer/config/data.go index 64b861f39f..c0664f94ec 100644 --- a/pkg/management/pgbouncer/config/data.go +++ b/pkg/management/pgbouncer/config/data.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/secrets.go b/pkg/management/pgbouncer/config/secrets.go index f3185db95a..09b9312018 100644 --- a/pkg/management/pgbouncer/config/secrets.go +++ b/pkg/management/pgbouncer/config/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package config contains the code related to the generation of the PgBouncer configuration diff --git a/pkg/management/pgbouncer/config/secrets_test.go b/pkg/management/pgbouncer/config/secrets_test.go index 7be5e8d006..d34df9628c 100644 --- a/pkg/management/pgbouncer/config/secrets_test.go +++ b/pkg/management/pgbouncer/config/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/strings.go b/pkg/management/pgbouncer/config/strings.go index ef01497b89..30c350bf62 100644 --- a/pkg/management/pgbouncer/config/strings.go +++ b/pkg/management/pgbouncer/config/strings.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/strings_test.go b/pkg/management/pgbouncer/config/strings_test.go index d0b901276b..f6585584e3 100644 --- a/pkg/management/pgbouncer/config/strings_test.go +++ b/pkg/management/pgbouncer/config/strings_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/suite_test.go b/pkg/management/pgbouncer/config/suite_test.go index 08e6f7f809..bdea666cb3 100644 --- a/pkg/management/pgbouncer/config/suite_test.go +++ b/pkg/management/pgbouncer/config/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/metricsserver/lists.go b/pkg/management/pgbouncer/metricsserver/lists.go index 25a1ad678f..b0b66aa4b8 100644 --- a/pkg/management/pgbouncer/metricsserver/lists.go +++ b/pkg/management/pgbouncer/metricsserver/lists.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver @@ -120,11 +123,13 @@ func NewShowListsMetrics(subsystem string) ShowListsMetrics { } func (e *Exporter) collectShowLists(ch chan<- prometheus.Metric, db *sql.DB) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.ShowLists.Reset() // First, let's check the connection. No need to proceed if this fails. rows, err := db.Query("SHOW LISTS;") if err != nil { - log.Error(err, "Error while executing SHOW LISTS") + contextLogger.Error(err, "Error while executing SHOW LISTS") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -135,7 +140,7 @@ func (e *Exporter) collectShowLists(ch chan<- prometheus.Metric, db *sql.DB) { defer func() { err = rows.Close() if err != nil { - log.Error(err, "while closing rows for SHOW LISTS") + contextLogger.Error(err, "while closing rows for SHOW LISTS") } }() @@ -146,14 +151,14 @@ func (e *Exporter) collectShowLists(ch chan<- prometheus.Metric, db *sql.DB) { for rows.Next() { if err = rows.Scan(&list, &item); err != nil { - log.Error(err, "Error while executing SHOW LISTS") + contextLogger.Error(err, "Error while executing SHOW LISTS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } m, ok := e.Metrics.ShowLists[list] if !ok { e.Metrics.Error.Set(1) - log.Info("Missing metric", "query", "SHOW LISTS", "metric", list) + contextLogger.Info("Missing metric", "query", "SHOW LISTS", "metric", list) continue } m.Set(float64(item)) diff --git a/pkg/management/pgbouncer/metricsserver/metricsserver.go b/pkg/management/pgbouncer/metricsserver/metricsserver.go index 6245d6353c..cd37f5a565 100644 --- a/pkg/management/pgbouncer/metricsserver/metricsserver.go +++ b/pkg/management/pgbouncer/metricsserver/metricsserver.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metricsserver contains the web server powering metrics @@ -45,10 +48,10 @@ var ( // Setup configure the web statusServer for a certain PostgreSQL instance, and // must be invoked before starting the real web statusServer -func Setup() error { +func Setup(ctx context.Context) error { // create the exporter and serve it on the /metrics endpoint registry = prometheus.NewRegistry() - exporter = NewExporter() + exporter = NewExporter(ctx) if err := registry.Register(exporter); err != nil { return fmt.Errorf("while registering PgBouncer exporters: %w", err) } diff --git a/pkg/management/pgbouncer/metricsserver/metricsserver_test.go b/pkg/management/pgbouncer/metricsserver/metricsserver_test.go index 2b27ec2945..726961e8b5 100644 --- a/pkg/management/pgbouncer/metricsserver/metricsserver_test.go +++ b/pkg/management/pgbouncer/metricsserver/metricsserver_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver @@ -29,8 +32,8 @@ var _ = Describe("MetricsServer", func() { exporter = nil }) - It("should register exporters and collectors successfully", func() { - err := Setup() + It("should register exporters and collectors successfully", func(ctx SpecContext) { + err := Setup(ctx) Expect(err).NotTo(HaveOccurred()) mfs, err := registry.Gather() diff --git a/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go b/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go index ccf2860629..77e12bbe1a 100644 --- a/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go +++ b/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metricsserver enables to expose a set of metrics and collectors on a given postgres instance package metricsserver import ( + "context" "database/sql" "fmt" "time" @@ -35,6 +39,7 @@ const PrometheusNamespace = "cnpg" // Exporter exports a set of metrics and collectors on a given postgres instance type Exporter struct { + ctx context.Context Metrics *metrics pool pool.Pooler } @@ -53,8 +58,9 @@ type metrics struct { } // NewExporter creates an exporter -func NewExporter() *Exporter { +func NewExporter(ctx context.Context) *Exporter { return &Exporter{ + ctx: ctx, Metrics: newMetrics(), } } @@ -122,6 +128,8 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { } func (e *Exporter) collectPgBouncerMetrics(ch chan<- prometheus.Metric) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.CollectionsTotal.Inc() collectionStart := time.Now() defer func() { @@ -129,7 +137,7 @@ func (e *Exporter) collectPgBouncerMetrics(ch chan<- prometheus.Metric) { }() db, err := e.GetPgBouncerDB() if err != nil { - log.Error(err, "Error opening connection to PostgreSQL") + contextLogger.Error(err, "Error opening connection to PostgreSQL") e.Metrics.Error.Set(1) return } diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go index 4913ab19a5..a77f789a81 100644 --- a/pkg/management/pgbouncer/metricsserver/pools.go +++ b/pkg/management/pgbouncer/metricsserver/pools.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver @@ -39,7 +42,8 @@ type ShowPoolsMetrics struct { SvLogin, MaxWait, MaxWaitUs, - PoolMode *prometheus.GaugeVec + PoolMode, + LoadBalanceHosts *prometheus.GaugeVec } // Describe produces the description for all the contained Metrics @@ -180,15 +184,23 @@ func NewShowPoolsMetrics(subsystem string) *ShowPoolsMetrics { Name: "pool_mode", Help: "The pooling mode in use. 1 for session, 2 for transaction, 3 for statement, -1 if unknown", }, []string{"database", "user"}), + LoadBalanceHosts: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "load_balance_hosts", + Help: "Number of hosts not load balancing between hosts", + }, []string{"database", "user"}), } } func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.ShowPools.Reset() // First, let's check the connection. No need to proceed if this fails. rows, err := db.Query("SHOW POOLS;") if err != nil { - log.Error(err, "Error while executing SHOW POOLS") + contextLogger.Error(err, "Error while executing SHOW POOLS") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -199,7 +211,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { defer func() { err = rows.Close() if err != nil { - log.Error(err, "while closing rows for SHOW POOLS") + contextLogger.Error(err, "while closing rows for SHOW POOLS") } }() @@ -231,16 +243,24 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { svActiveCancel int svBeingCanceled int ) + // PGBouncer 1.24.0 or above + var ( + loadBalanceHosts sql.NullInt32 + ) cols, err := rows.Columns() if err != nil { - log.Error(err, "Error while getting number of columns") + contextLogger.Error(err, "Error while getting number of columns") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return } for rows.Next() { - const poolsColumnsPgBouncer1180 = 16 + const ( + poolsColumnsPgBouncer1180 = 16 + poolsColumnsPgBouncer1240 = 17 + ) + switch len(cols) { case poolsColumnsPgBouncer1180: if err = rows.Scan(&database, &user, @@ -259,7 +279,29 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { &maxWaitUs, &poolMode, ); err != nil { - log.Error(err, "Error while executing SHOW POOLS") + contextLogger.Error(err, "Error while executing SHOW POOLS") + e.Metrics.Error.Set(1) + e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() + } + case poolsColumnsPgBouncer1240: + if err = rows.Scan(&database, &user, + &clActive, + &clWaiting, + &clActiveCancelReq, + &clWaitingCancelReq, + &svActive, + &svActiveCancel, + &svBeingCanceled, + &svIdle, + &svUsed, + &svTested, + &svLogin, + &maxWait, + &maxWaitUs, + &poolMode, + &loadBalanceHosts, + ); err != nil { + contextLogger.Error(err, "Error while executing SHOW POOLS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } @@ -277,7 +319,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { &maxWaitUs, &poolMode, ); err != nil { - log.Error(err, "Error while executing SHOW POOLS") + contextLogger.Error(err, "Error while executing SHOW POOLS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } @@ -297,6 +339,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowPools.MaxWait.WithLabelValues(database, user).Set(float64(maxWait)) e.Metrics.ShowPools.MaxWaitUs.WithLabelValues(database, user).Set(float64(maxWaitUs)) e.Metrics.ShowPools.PoolMode.WithLabelValues(database, user).Set(float64(poolModeToInt(poolMode))) + e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts.Int32)) } e.Metrics.ShowPools.ClActive.Collect(ch) @@ -314,6 +357,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowPools.MaxWait.Collect(ch) e.Metrics.ShowPools.MaxWaitUs.Collect(ch) e.Metrics.ShowPools.PoolMode.Collect(ch) + e.Metrics.ShowPools.LoadBalanceHosts.Collect(ch) if err = rows.Err(); err != nil { e.Metrics.Error.Set(1) diff --git a/pkg/management/pgbouncer/metricsserver/pools_test.go b/pkg/management/pgbouncer/metricsserver/pools_test.go index 117a219b41..f96f318d63 100644 --- a/pkg/management/pgbouncer/metricsserver/pools_test.go +++ b/pkg/management/pgbouncer/metricsserver/pools_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver @@ -53,7 +56,7 @@ var _ = Describe("Exporter", func() { } ) - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { var err error db, mock, err = sqlmock.New() Expect(err).ShouldNot(HaveOccurred()) @@ -61,6 +64,7 @@ var _ = Describe("Exporter", func() { exp = &Exporter{ Metrics: newMetrics(), pool: fakePooler{db: db}, + ctx: ctx, } registry = prometheus.NewRegistry() diff --git a/pkg/management/pgbouncer/metricsserver/stats.go b/pkg/management/pgbouncer/metricsserver/stats.go index ebd19b6a1f..2c46ff72e1 100644 --- a/pkg/management/pgbouncer/metricsserver/stats.go +++ b/pkg/management/pgbouncer/metricsserver/stats.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver @@ -25,7 +28,10 @@ import ( // ShowStatsMetrics contains all the SHOW STATS Metrics type ShowStatsMetrics struct { - TotalServerAssigCount, + TotalBindCount, + TotalClientParseCount, + TotalServerAssignCount, + TotalServerParseCount, TotalXactCount, TotalQueryCount, TotalReceived, @@ -33,7 +39,10 @@ type ShowStatsMetrics struct { TotalXactTime, TotalQueryTime, TotalWaitTime, - AvgServerAssigCount, + AvgBindCount, + AvgClientParseCount, + AvgServerAssignCount, + AvgServerParseCount, AvgXactCount, AvgQueryCount, AvgRecv, @@ -45,7 +54,10 @@ type ShowStatsMetrics struct { // Describe produces the description for all the contained Metrics func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) { - r.TotalServerAssigCount.Describe(ch) + r.TotalBindCount.Describe(ch) + r.TotalClientParseCount.Describe(ch) + r.TotalServerAssignCount.Describe(ch) + r.TotalServerParseCount.Describe(ch) r.TotalXactCount.Describe(ch) r.TotalQueryCount.Describe(ch) r.TotalReceived.Describe(ch) @@ -53,7 +65,10 @@ func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) { r.TotalXactTime.Describe(ch) r.TotalQueryTime.Describe(ch) r.TotalWaitTime.Describe(ch) - r.AvgServerAssigCount.Describe(ch) + r.AvgBindCount.Describe(ch) + r.AvgClientParseCount.Describe(ch) + r.AvgServerAssignCount.Describe(ch) + r.AvgServerParseCount.Describe(ch) r.AvgXactCount.Describe(ch) r.AvgQueryCount.Describe(ch) r.AvgRecv.Describe(ch) @@ -65,7 +80,10 @@ func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) { // Reset resets all the contained Metrics func (r *ShowStatsMetrics) Reset() { - r.AvgServerAssigCount.Reset() + r.TotalBindCount.Reset() + r.TotalClientParseCount.Reset() + r.TotalServerAssignCount.Reset() + r.TotalServerParseCount.Reset() r.TotalXactCount.Reset() r.TotalQueryCount.Reset() r.TotalReceived.Reset() @@ -73,7 +91,10 @@ func (r *ShowStatsMetrics) Reset() { r.TotalXactTime.Reset() r.TotalQueryTime.Reset() r.TotalWaitTime.Reset() - r.AvgServerAssigCount.Reset() + r.AvgBindCount.Reset() + r.TotalClientParseCount.Reset() + r.AvgServerAssignCount.Reset() + r.TotalServerParseCount.Reset() r.AvgXactCount.Reset() r.AvgQueryCount.Reset() r.AvgRecv.Reset() @@ -87,12 +108,31 @@ func (r *ShowStatsMetrics) Reset() { func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics { subsystem += "_stats" return &ShowStatsMetrics{ - TotalServerAssigCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + TotalBindCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "total_bind_count", + Help: "Total number of prepared statements readied for execution by clients and forwarded to " + + "PostgreSQL by pgbouncer", + }, []string{"database"}), + TotalClientParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "total_client_parse_count", + Help: "Total number of prepared statements created by clients.", + }, []string{"database"}), + TotalServerAssignCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "total_server_assignment_count", Help: "Total time a server was assigned to a client.", }, []string{"database"}), + TotalServerParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "total_server_parse_count", + Help: "Total number of prepared statements created by pgbouncer on a server.", + }, []string{"database"}), TotalXactCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, @@ -137,13 +177,32 @@ func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics { Name: "total_wait_time", Help: "Time spent by clients waiting for a server, in microseconds.", }, []string{"database"}), - AvgServerAssigCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + AvgBindCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "avg_bind_count", + Help: "Average number of prepared statements readied for execution by clients and forwarded to " + + "PostgreSQL by pgbouncer.", + }, []string{"database"}), + AvgClientParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "avg_client_parse_count", + Help: "Average number of prepared statements created by clients.", + }, []string{"database"}), + AvgServerAssignCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "avg_server_assignment_count", Help: "Average number of times a server was assigned to a client per second in " + "the last stat period.", }, []string{"database"}), + AvgServerParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "avg_server_parse_count", + Help: "Average number of prepared statements created by pgbouncer on a server.", + }, []string{"database"}), AvgXactCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, @@ -190,11 +249,13 @@ func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics { } func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.ShowStats.Reset() // First, let's check the connection. No need to proceed if this fails. rows, err := db.Query("SHOW STATS;") if err != nil { - log.Error(err, "Error while executing SHOW STATS") + contextLogger.Error(err, "Error while executing SHOW STATS") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -205,7 +266,7 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { defer func() { err = rows.Close() if err != nil { - log.Error(err, "while closing rows for SHOW STATS") + contextLogger.Error(err, "while closing rows for SHOW STATS") } }() var ( @@ -228,13 +289,22 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { // PGBouncer >= 1.23.0 var ( - totalServerAssigCount, - avgServerAssigCount int + totalServerAssignCount, + avgServerAssignCount int ) + // PGBouncer >= 1.24.0 + var ( + totalClientParseCount, + totalServerParseCount, + totalBindCount, + avgClientParseCount, + avgServerParseCount, + avgBindCount int + ) statCols, err := rows.Columns() if err != nil { - log.Error(err, "Error while reading SHOW STATS") + contextLogger.Error(err, "Error while reading SHOW STATS") return } @@ -242,7 +312,8 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { for rows.Next() { var err error - if statColsCount < 16 { + switch { + case statColsCount < 16: err = rows.Scan(&database, &totalXactCount, &totalQueryCount, @@ -259,9 +330,28 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { &avgQueryTime, &avgWaitTime, ) - } else { + case statColsCount == 17: + err = rows.Scan(&database, + &totalServerAssignCount, + &totalXactCount, + &totalQueryCount, + &totalReceived, + &totalSent, + &totalXactTime, + &totalQueryTime, + &totalWaitTime, + &avgServerAssignCount, + &avgXactCount, + &avgQueryCount, + &avgRecv, + &avgSent, + &avgXactTime, + &avgQueryTime, + &avgWaitTime, + ) + default: err = rows.Scan(&database, - &totalServerAssigCount, + &totalServerAssignCount, &totalXactCount, &totalQueryCount, &totalReceived, @@ -269,7 +359,10 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { &totalXactTime, &totalQueryTime, &totalWaitTime, - &avgServerAssigCount, + &totalClientParseCount, + &totalServerParseCount, + &totalBindCount, + &avgServerAssignCount, &avgXactCount, &avgQueryCount, &avgRecv, @@ -277,10 +370,13 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { &avgXactTime, &avgQueryTime, &avgWaitTime, + &avgClientParseCount, + &avgServerParseCount, + &avgBindCount, ) } if err != nil { - log.Error(err, "Error while executing SHOW STATS") + contextLogger.Error(err, "Error while executing SHOW STATS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } @@ -300,19 +396,27 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowStats.AvgQueryTime.WithLabelValues(database).Set(float64(avgQueryTime)) e.Metrics.ShowStats.AvgWaitTime.WithLabelValues(database).Set(float64(avgWaitTime)) - if statColsCount >= 16 { - e.Metrics.ShowStats.TotalServerAssigCount.WithLabelValues(database).Set( - float64(totalServerAssigCount)) - e.Metrics.ShowStats.AvgServerAssigCount.WithLabelValues(database).Set( - float64(avgServerAssigCount)) + if statColsCount == 16 { + e.Metrics.ShowStats.TotalServerAssignCount.WithLabelValues(database).Set( + float64(totalServerAssignCount)) + e.Metrics.ShowStats.AvgServerAssignCount.WithLabelValues(database).Set( + float64(avgServerAssignCount)) + } else { + e.Metrics.ShowStats.TotalClientParseCount.WithLabelValues(database).Set( + float64(totalClientParseCount)) + e.Metrics.ShowStats.TotalServerParseCount.WithLabelValues(database).Set( + float64(totalServerParseCount)) + e.Metrics.ShowStats.TotalBindCount.WithLabelValues(database).Set( + float64(totalBindCount)) + e.Metrics.ShowStats.AvgClientParseCount.WithLabelValues(database).Set( + float64(avgClientParseCount)) + e.Metrics.ShowStats.AvgServerParseCount.WithLabelValues(database).Set( + float64(avgServerParseCount)) + e.Metrics.ShowStats.AvgBindCount.WithLabelValues(database).Set( + float64(avgBindCount)) } } - if statColsCount >= 16 { - e.Metrics.ShowStats.TotalServerAssigCount.Collect(ch) - e.Metrics.ShowStats.AvgServerAssigCount.Collect(ch) - } - e.Metrics.ShowStats.TotalXactCount.Collect(ch) e.Metrics.ShowStats.TotalQueryCount.Collect(ch) e.Metrics.ShowStats.TotalReceived.Collect(ch) @@ -328,6 +432,18 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowStats.AvgQueryTime.Collect(ch) e.Metrics.ShowStats.AvgWaitTime.Collect(ch) + if statColsCount == 16 { + e.Metrics.ShowStats.TotalServerAssignCount.Collect(ch) + e.Metrics.ShowStats.AvgServerAssignCount.Collect(ch) + } else { + e.Metrics.ShowStats.TotalClientParseCount.Collect(ch) + e.Metrics.ShowStats.TotalServerParseCount.Collect(ch) + e.Metrics.ShowStats.TotalBindCount.Collect(ch) + e.Metrics.ShowStats.AvgClientParseCount.Collect(ch) + e.Metrics.ShowStats.AvgServerParseCount.Collect(ch) + e.Metrics.ShowStats.AvgBindCount.Collect(ch) + } + if err = rows.Err(); err != nil { e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() diff --git a/pkg/management/pgbouncer/metricsserver/stats_test.go b/pkg/management/pgbouncer/metricsserver/stats_test.go index 1094cccd72..4afa31d1ad 100644 --- a/pkg/management/pgbouncer/metricsserver/stats_test.go +++ b/pkg/management/pgbouncer/metricsserver/stats_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver @@ -40,7 +43,7 @@ var _ = Describe("MetricsServer", func() { ch chan prometheus.Metric ) - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { var err error db, mock, err = sqlmock.New() Expect(err).NotTo(HaveOccurred()) @@ -48,6 +51,7 @@ var _ = Describe("MetricsServer", func() { exp = &Exporter{ Metrics: newMetrics(), pool: fakePooler{db: db}, + ctx: ctx, } registry = prometheus.NewRegistry() registry.MustRegister(exp.Metrics.Error) diff --git a/pkg/management/pgbouncer/metricsserver/suite_test.go b/pkg/management/pgbouncer/metricsserver/suite_test.go index ab7aa16d64..7038b67d87 100644 --- a/pkg/management/pgbouncer/metricsserver/suite_test.go +++ b/pkg/management/pgbouncer/metricsserver/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go new file mode 100644 index 0000000000..d9750949de --- /dev/null +++ b/pkg/management/postgres/archiver/archiver.go @@ -0,0 +1,354 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package archiver + +import ( + "context" + "errors" + "fmt" + "math" + "path" + "path/filepath" + "time" + + barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + walUtils "github.com/cloudnative-pg/machinery/pkg/fileutils/wals" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// errSwitchoverInProgress is raised when there is a switchover in progress +// and the new primary have not completed the promotion +var errSwitchoverInProgress = fmt.Errorf("switchover in progress, refusing archiving") + +// ErrMissingWALArchiverPlugin is raised when we try to archive a WAL +// file with a CNPG-i plugin whose socket does not exist. +type ErrMissingWALArchiverPlugin struct { + // PluginName is the name of the plugin that is missing + PluginName string +} + +// Error implements the error interface +func (e ErrMissingWALArchiverPlugin) Error() string { + return fmt.Sprintf("wal archive plugin is not available: %s", e.PluginName) +} + +// ArchiveAllReadyWALs ensures that all WAL files that are in the "ready" +// queue have been archived. +// This is used to ensure that a former primary will archive the WAL files in +// its queue even in case of an unclean shutdown. +func ArchiveAllReadyWALs( + ctx context.Context, + cluster *apiv1.Cluster, + pgData string, +) error { + contextLog := log.FromContext(ctx) + + noWALLeft := errors.New("no wal files to archive") + + iterator := func() error { + walList := walUtils.GatherReadyWALFiles( + ctx, walUtils.GatherReadyWALFilesConfig{ + MaxResults: math.MaxInt32 - 1, + PgDataPath: pgData, + }, + ) + + if len(walList.Ready) > 0 { + contextLog.Info( + "Detected ready WAL files in a former primary, triggering WAL archiving", + "readyWALCount", len(walList.Ready), + ) + contextLog.Debug( + "List of ready WALs", + "readyWALs", walList.Ready, + ) + } + + for _, wal := range walList.ReadyItemsToSlice() { + if err := internalRun(ctx, pgData, cluster, wal); err != nil { + return err + } + + if err := walList.MarkAsDone(ctx, wal); err != nil { + return err + } + } + + if !walList.HasMoreResults { + return noWALLeft + } + + return nil + } + + for { + if err := iterator(); err != nil { + if errors.Is(err, noWALLeft) { + return nil + } + return err + } + } +} + +// Run implements the WAL archiving process given the current cluster definition +// and the current Pod Name. +func Run( + ctx context.Context, + podName, pgData string, + cluster *apiv1.Cluster, + walName string, +) error { + contextLog := log.FromContext(ctx) + + if cluster.IsReplica() { + if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary { + contextLog.Debug("WAL archiving on a replica cluster, "+ + "but this node is not the target primary nor the current one. "+ + "Skipping WAL archiving", + "walName", walName, + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary, + ) + return nil + } + } + + if cluster.Status.CurrentPrimary != podName { + contextLog.Info("Refusing to archive WAL when there is a switchover in progress", + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary, + "podName", podName) + return errSwitchoverInProgress + } + + return internalRun(ctx, pgData, cluster, walName) +} + +func internalRun( + ctx context.Context, + pgData string, + cluster *apiv1.Cluster, + walName string, +) error { + contextLog := log.FromContext(ctx) + startTime := time.Now() + + // We allow plugins to archive WALs even if there is no plugin + // directly enabled by the user, to retain compatibility with + // the old API. + if err := archiveWALViaPlugins(ctx, cluster, pgData, walName); err != nil { + return err + } + + // If the used chosen a plugin to do WAL archiving, we don't + // trigger the legacy archiving process. + if cluster.GetEnabledWALArchivePluginName() != "" { + return nil + } + + // Request Barman Cloud to archive this WAL + if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { + // Backup not configured, skipping WAL + contextLog.Debug("Backup not configured, skip WAL archiving via Barman Cloud", + "walName", walName, + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary, + ) + return nil + } + + // Get environment from cache + env, err := local.NewClient().Cache().GetEnv(cache.WALArchiveKey) + if err != nil { + return fmt.Errorf("failed to get envs: %w", err) + } + + // Create the archiver + var walArchiver *barmanArchiver.WALArchiver + if walArchiver, err = barmanArchiver.New( + ctx, + env, + postgres.SpoolDirectory, + pgData, + path.Join(pgData, constants.CheckEmptyWalArchiveFile)); err != nil { + return fmt.Errorf("while creating the archiver: %w", err) + } + + // Step 1: Check if the archive location is safe to perform archiving + if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) { + if err := checkWalArchive(ctx, cluster, walArchiver, pgData); err != nil { + return err + } + } + + // Step 2: check if this WAL file has not been already archived + var isDeletedFromSpool bool + isDeletedFromSpool, err = walArchiver.DeleteFromSpool(walName) + if err != nil { + return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err) + } + if isDeletedFromSpool { + contextLog.Info("WAL file already archived, skipping", + "walName", walName, + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary) + return nil + } + + // Step 3: gather the WAL files names to archive + walFilesList := walUtils.GatherReadyWALFiles( + ctx, + walUtils.GatherReadyWALFilesConfig{ + MaxResults: getMaxResult(cluster), + SkipWALs: []string{walName}, + PgDataPath: pgData, + }, + ) + + // Ensure the requested WAL file is always the first one being + // archived + walFilesList.Ready = append([]string{walName}, walFilesList.Ready...) + contextLog.Debug("WAL files to archive", "walFilesListReady", walFilesList.Ready) + + options, err := walArchiver.BarmanCloudWalArchiveOptions( + ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) + if err != nil { + return err + } + + // Step 5: archive the WAL files in parallel + uploadStartTime := time.Now() + walStatus := walArchiver.ArchiveList(ctx, walFilesList.ReadyItemsToSlice(), options) + if len(walStatus) > 1 { + contextLog.Info("Completed archive command (parallel)", + "walsCount", len(walStatus), + "startTime", startTime, + "uploadStartTime", uploadStartTime, + "uploadTotalTime", time.Since(uploadStartTime), + "totalTime", time.Since(startTime)) + } + + // We return only the first error to PostgreSQL, because the first error + // is the one raised by the file that PostgreSQL has requested to archive. + // The other errors are related to WAL files that were pre-archived as + // a performance optimization and are just logged + return walStatus[0].Err +} + +func getMaxResult(cluster *apiv1.Cluster) int { + if cluster.Spec.Backup.BarmanObjectStore.Wal != nil && cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel > 0 { + return cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel - 1 + } + return 0 +} + +// archiveWALViaPlugins requests every capable plugin to archive the passed +// WAL file, and returns an error if a configured plugin fails to do so. +// It will not return an error if there's no plugin capable of WAL archiving +func archiveWALViaPlugins( + ctx context.Context, + cluster *apiv1.Cluster, + pgData string, + walName string, +) error { + contextLogger := log.FromContext(ctx) + + // check if the `walName` is an absolute path or just the filename + if !filepath.IsAbs(walName) { + walName = filepath.Join(pgData, walName) + } + + plugins := repository.New() + defer plugins.Close() + + enabledPluginNamesSet := stringset.From(apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)) + + client, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + contextLogger.Error(err, "Error while loading required plugins") + return err + } + defer client.Close(ctx) + + enabledArchiverPluginName := cluster.GetEnabledWALArchivePluginName() + if enabledArchiverPluginName != "" && !client.HasPlugin(enabledArchiverPluginName) { + return ErrMissingWALArchiverPlugin{ + PluginName: enabledArchiverPluginName, + } + } + + return client.ArchiveWAL(ctx, cluster, walName) +} + +// isCheckWalArchiveFlagFilePresent returns true if the file CheckEmptyWalArchiveFile is present in the PGDATA directory +func isCheckWalArchiveFlagFilePresent(ctx context.Context, pgDataDirectory string) bool { + contextLogger := log.FromContext(ctx) + filePath := filepath.Join(pgDataDirectory, constants.CheckEmptyWalArchiveFile) + + exists, err := fileutils.FileExists(filePath) + if err != nil { + contextLogger.Error(err, "error while checking for the existence of the CheckEmptyWalArchiveFile") + } + // If the check empty wal archive file doesn't exist this it's a no-op + if !exists { + contextLogger.Debug("WAL check flag file not found, skipping check") + return false + } + + return exists +} + +func checkWalArchive( + ctx context.Context, + cluster *apiv1.Cluster, + walArchiver *barmanArchiver.WALArchiver, + pgData string, +) error { + contextLogger := log.FromContext(ctx) + checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( + ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) + if err != nil { + contextLogger.Error(err, "while getting barman-cloud-wal-archive options") + return err + } + + if !isCheckWalArchiveFlagFilePresent(ctx, pgData) { + return nil + } + + if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { + contextLogger.Error(err, "while barman-cloud-check-wal-archive") + return err + } + + return nil +} diff --git a/pkg/management/postgres/archiver/doc.go b/pkg/management/postgres/archiver/doc.go new file mode 100644 index 0000000000..6b40b65e25 --- /dev/null +++ b/pkg/management/postgres/archiver/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package archiver contains the logic of the CloudNativePG WAL archiver +package archiver diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index f50fbd477a..292d670e41 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -25,13 +28,13 @@ import ( "time" barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup" - barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" - "k8s.io/apimachinery/pkg/api/meta" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -40,10 +43,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" // this is needed to correctly open the sql connection with the pgx driver _ "github.com/jackc/pgx/v5/stdlib" @@ -64,7 +66,6 @@ type BackupCommand struct { Env []string Log log.Logger Instance *Instance - Capabilities *barmanCapabilities.Capabilities barmanBackup *barmanBackup.Command } @@ -78,11 +79,6 @@ func NewBarmanBackupCommand( instance *Instance, log log.Logger, ) (*BackupCommand, error) { - capabilities, err := barmanCapabilities.CurrentCapabilities() - if err != nil { - return nil, err - } - return &BackupCommand{ Cluster: cluster, Backup: backup, @@ -91,17 +87,14 @@ func NewBarmanBackupCommand( Env: os.Environ(), Instance: instance, Log: log, - Capabilities: capabilities, - barmanBackup: barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, capabilities), + barmanBackup: barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore), }, nil } // Start initiates a backup for this instance using // barman-cloud-backup func (b *BackupCommand) Start(ctx context.Context) error { - if err := b.ensureCompatibility(); err != nil { - return err - } + contextLogger := log.FromContext(ctx) b.setupBackupStatus() @@ -111,7 +104,7 @@ func (b *BackupCommand) Start(ctx context.Context) error { } if err := ensureWalArchiveIsWorking(b.Instance); err != nil { - log.Warning("WAL archiving is not working", "err", err) + contextLogger.Warning("WAL archiving is not working", "err", err) b.Backup.GetStatus().Phase = apiv1.BackupPhaseWalArchivingFailing return PatchBackupStatusAndRetry(ctx, b.Client, b.Backup) } @@ -120,7 +113,7 @@ func (b *BackupCommand) Start(ctx context.Context) error { b.Backup.GetStatus().Phase = apiv1.BackupPhaseRunning err := PatchBackupStatusAndRetry(ctx, b.Client, b.Backup) if err != nil { - log.Error(err, "can't set backup as WAL archiving failing") + contextLogger.Error(err, "can't set backup as WAL archiving failing") } } @@ -140,15 +133,6 @@ func (b *BackupCommand) Start(ctx context.Context) error { return nil } -func (b *BackupCommand) ensureCompatibility() error { - postgresVers, err := b.Instance.GetPgVersion() - if err != nil { - return err - } - - return b.barmanBackup.IsCompatible(postgresVers) -} - func (b *BackupCommand) retryWithRefreshedCluster( ctx context.Context, cb func() error, @@ -170,33 +154,11 @@ func (b *BackupCommand) run(ctx context.Context) { ) if err := b.takeBackup(ctx); err != nil { - backupStatus := b.Backup.GetStatus() - // record the failure b.Log.Error(err, "Backup failed") b.Recorder.Event(b.Backup, "Normal", "Failed", "Backup failed") - // update backup status as failed - backupStatus.SetAsFailed(err) - if err := PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - b.Log.Error(err, "Can't mark backup as failed") - // We do not terminate here because we still want to do the maintenance - // activity on the backups and to set the condition on the cluster. - } - - // add backup failed condition to the cluster - if failErr := b.retryWithRefreshedCluster(ctx, func() error { - origCluster := b.Cluster.DeepCopy() - - meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(err)) - - b.Cluster.Status.LastFailedBackup = utils.GetCurrentTimestampWithFormat(time.RFC3339) - return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) - }); failErr != nil { - b.Log.Error(failErr, "while setting cluster condition for failed backup") - // We do not terminate here because it's more important to properly handle - // the backup maintenance activity than putting a condition in the cluster - } + _ = status.FlagBackupAsFailed(ctx, b.Client, b.Backup, b.Cluster, err) } b.backupMaintenance(ctx) @@ -209,8 +171,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { - // TODO: this condition is set only here, never removed or handled? - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { b.Log.Error(err, "Error changing backup condition (backup started)") // We do not terminate here because we could still have a good backup @@ -227,7 +188,6 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { b.Backup.Status.BackupName, backupStatus.ServerName, b.Env, - b.Cluster, postgres.BackupTemporaryDirectory, ) if err != nil { @@ -242,7 +202,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { b.Backup.Status.SetAsCompleted() barmanBackup, err := b.barmanBackup.GetExecutedBackupInfo( - ctx, b.Backup.Status.BackupName, backupStatus.ServerName, b.Cluster, b.Env) + ctx, b.Backup.Status.BackupName, backupStatus.ServerName, b.Env) if err != nil { return err } @@ -256,7 +216,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { b.Log.Error(err, "Can't update the cluster with the completed backup data") } @@ -303,7 +263,7 @@ func (b *BackupCommand) backupMaintenance(ctx context.Context) { data.GetLastSuccessfulBackupTime(), ) - if reflect.DeepEqual(origCluster.Status, b.Cluster.Status) { + if equality.Semantic.DeepEqual(origCluster.Status, b.Cluster.Status) { return nil } return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) @@ -341,9 +301,7 @@ func (b *BackupCommand) setupBackupStatus() { barmanConfiguration := b.Cluster.Spec.Backup.BarmanObjectStore backupStatus := b.Backup.GetStatus() - if b.Capabilities.ShouldExecuteBackupWithName(b.Cluster) { - backupStatus.BackupName = fmt.Sprintf("backup-%v", utils.ToCompactISO8601(time.Now())) - } + backupStatus.BackupName = fmt.Sprintf("backup-%v", pgTime.ToCompactISO8601(time.Now())) backupStatus.BarmanCredentials = barmanConfiguration.BarmanCredentials backupStatus.EndpointCA = barmanConfiguration.EndpointCA backupStatus.EndpointURL = barmanConfiguration.EndpointURL diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go index 18307bc791..e521da2620 100644 --- a/pkg/management/postgres/backup_test.go +++ b/pkg/management/postgres/backup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -22,7 +25,6 @@ import ( "strings" barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup" - barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities" "github.com/cloudnative-pg/machinery/pkg/log" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -117,26 +119,24 @@ var _ = Describe("testing backup command", func() { }, }, } - capabilities, err := barmanCapabilities.CurrentCapabilities() - Expect(err).ShouldNot(HaveOccurred()) backupCommand = BackupCommand{ Cluster: cluster, Backup: backup, Client: fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster, backup). + WithStatusSubresource(cluster, backup). Build(), - Recorder: &record.FakeRecorder{}, - Env: os.Environ(), - Log: log.FromContext(context.Background()), - Instance: &Instance{}, - Capabilities: capabilities, + Recorder: &record.FakeRecorder{}, + Env: os.Environ(), + Log: log.FromContext(context.Background()), + Instance: &Instance{}, } }) It("should fail and update cluster and backup resource", func() { backupCommand.run(context.Background()) - Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) + Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) //nolint:staticcheck clusterCond := meta.FindStatusCondition(cluster.Status.Conditions, string(apiv1.ConditionBackup)) Expect(clusterCond.Status).To(Equal(metav1.ConditionFalse)) @@ -149,39 +149,32 @@ var _ = Describe("testing backup command", func() { var _ = Describe("generate backup options", func() { const namespace = "test" - capabilities := barmanCapabilities.Capabilities{ - Version: nil, - HasAzure: true, - HasS3: true, - HasGoogle: true, - HasRetentionPolicy: true, - HasTags: true, - HasCheckWalArchive: true, - HasSnappy: true, - HasErrorCodesForWALRestore: true, - HasAzureManagedIdentity: true, - } - cluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, - Spec: apiv1.ClusterSpec{ - Backup: &apiv1.BackupConfiguration{ - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - Data: &apiv1.DataBackupConfiguration{ - Compression: "gzip", - Encryption: "aes256", - ImmediateCheckpoint: true, - Jobs: ptr.To(int32(2)), + + var cluster *apiv1.Cluster + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + Data: &apiv1.DataBackupConfiguration{ + Compression: "gzip", + Encryption: "aes256", + ImmediateCheckpoint: true, + Jobs: ptr.To(int32(2)), + }, }, }, }, - }, - } + } + }) It("should generate correct options", func() { extraOptions := []string{"--min-chunk-size=5MB", "--read-timeout=60", "-vv"} cluster.Spec.Backup.BarmanObjectStore.Data.AdditionalCommandArgs = extraOptions - cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, &capabilities) + cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore) options, err := cmd.GetDataConfiguration([]string{}) Expect(err).ToNot(HaveOccurred()) @@ -202,7 +195,7 @@ var _ = Describe("generate backup options", func() { "--encryption=aes256", } cluster.Spec.Backup.BarmanObjectStore.Data.AdditionalCommandArgs = extraOptions - cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, &capabilities) + cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore) options, err := cmd.GetDataConfiguration([]string{}) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 209777fc5d..6ee4ec1bbc 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -25,6 +28,7 @@ import ( "sort" "strings" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" @@ -33,13 +37,16 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/plugin" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/replication" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // InstallPgDataFileContent installs a file in PgData, returning true/false if // the file has been changed and an error state -func InstallPgDataFileContent(pgdata, contents, destinationFile string) (bool, error) { +func InstallPgDataFileContent(ctx context.Context, pgdata, contents, destinationFile string) (bool, error) { + contextLogger := log.FromContext(ctx) + targetFile := path.Join(pgdata, destinationFile) result, err := fileutils.WriteStringToFile(targetFile, contents) if err != nil { @@ -47,7 +54,7 @@ func InstallPgDataFileContent(pgdata, contents, destinationFile string) (bool, e } if result { - log.Info( + contextLogger.Info( "Installed configuration file", "pgdata", pgdata, "filename", destinationFile) @@ -60,15 +67,25 @@ func InstallPgDataFileContent(pgdata, contents, destinationFile string) (bool, e // PostgreSQL configuration and rewrites the file in the PGDATA if needed. This // function will return "true" if the configuration has been really changed. func (instance *Instance) RefreshConfigurationFilesFromCluster( + ctx context.Context, cluster *apiv1.Cluster, preserveUserSettings bool, + operationType postgresClient.OperationType_Type, ) (bool, error) { - postgresConfiguration, sha256, err := createPostgresqlConfiguration(cluster, preserveUserSettings) + pgMajor, err := postgresutils.GetMajorVersionFromPgData(instance.PgData) if err != nil { return false, err } + postgresConfiguration, sha256, err := createPostgresqlConfiguration( + ctx, cluster, preserveUserSettings, pgMajor, + operationType, + ) + if err != nil { + return false, fmt.Errorf("creating postgresql configuration: %w", err) + } postgresConfigurationChanged, err := InstallPgDataFileContent( + ctx, instance.PgData, postgresConfiguration, constants.PostgresqlCustomConfigurationFile) @@ -77,17 +94,14 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( "installing postgresql configuration: %w", err) } - - if sha256 != "" && postgresConfigurationChanged { - instance.ConfigSha256 = sha256 - } + instance.ConfigSha256 = sha256 return postgresConfigurationChanged, nil } // GeneratePostgresqlHBA generates the pg_hba.conf content with the LDAP configuration if configured. func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBindPassword string) (string, error) { - version, err := cluster.GetPostgresqlVersion() + majorVersion, err := cluster.GetPostgresqlMajorVersion() if err != nil { return "", err } @@ -100,7 +114,7 @@ func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBind // See: // https://www.postgresql.org/docs/14/release-14.html defaultAuthenticationMethod := "scram-sha-256" - if version < 140000 { + if majorVersion < 14 { defaultAuthenticationMethod = "md5" } @@ -111,7 +125,7 @@ func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBind } // RefreshPGHBA generates and writes down the pg_hba.conf file -func (instance *Instance) RefreshPGHBA(cluster *apiv1.Cluster, ldapBindPassword string) ( +func (instance *Instance) RefreshPGHBA(ctx context.Context, cluster *apiv1.Cluster, ldapBindPassword string) ( postgresHBAChanged bool, err error, ) { @@ -121,6 +135,7 @@ func (instance *Instance) RefreshPGHBA(cluster *apiv1.Cluster, ldapBindPassword return false, nil } postgresHBAChanged, err = InstallPgDataFileContent( + ctx, instance.PgData, pgHBAContent, constants.PostgresqlHBARulesFile) @@ -213,13 +228,17 @@ func (instance *Instance) generatePostgresqlIdent(additionalLines []string) (str // RefreshPGIdent generates and writes down the pg_ident.conf file given // a set of additional pg_ident lines that is usually taken from the // Cluster configuration -func (instance *Instance) RefreshPGIdent(additionalLines []string) (postgresIdentChanged bool, err error) { +func (instance *Instance) RefreshPGIdent( + ctx context.Context, + additionalLines []string, +) (postgresIdentChanged bool, err error) { // Generate pg_ident.conf file pgIdentContent, err := instance.generatePostgresqlIdent(additionalLines) if err != nil { return false, nil } postgresIdentChanged, err = InstallPgDataFileContent( + ctx, instance.PgData, pgIdentContent, constants.PostgresqlIdentFile) @@ -240,79 +259,27 @@ func UpdateReplicaConfiguration(pgData, primaryConnInfo, slotName string) (chang return changed, err } - major, err := postgresutils.GetMajorVersion(pgData) - if err != nil { - return false, err - } - - if major < 12 { - return configureRecoveryConfFile(pgData, primaryConnInfo, slotName) - } - return changed, createStandbySignal(pgData) } -// configureRecoveryConfFile configures replication in the recovery.conf file -// for PostgreSQL 11 and earlier -func configureRecoveryConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { - targetFile := path.Join(pgData, "recovery.conf") - +// configurePostgresOverrideConfFile writes the content of override.conf file, including +// replication information. The “primary_slot_name` parameter will be generated only when the parameter slotName is not +// empty. +// Returns a boolean indicating if any changes were done and any errors encountered +func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { + targetFile := path.Join(pgData, constants.PostgresqlOverrideConfigurationFile) options := map[string]string{ - "standby_mode": "on", "restore_command": fmt.Sprintf( "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", postgres.LogPath, postgres.LogFileName), "recovery_target_timeline": "latest", + "primary_conninfo": primaryConnInfo, } - if slotName != "" { + if len(slotName) > 0 { options["primary_slot_name"] = slotName } - if primaryConnInfo != "" { - options["primary_conninfo"] = primaryConnInfo - } - - changed, err = configfile.UpdatePostgresConfigurationFile( - targetFile, - options, - "primary_slot_name", - "primary_conninfo", - ) - if err != nil { - return false, err - } - if changed { - log.Info("Updated replication settings", "filename", "recovery.conf") - } - - return changed, nil -} - -// configurePostgresOverrideConfFile writes the content of override.conf file, including -// replication information -func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { - targetFile := path.Join(pgData, constants.PostgresqlOverrideConfigurationFile) - - major, err := postgresutils.GetMajorVersion(pgData) - if err != nil { - return false, err - } - - options := make(map[string]string) - - // Write replication control as GUCs (from PostgreSQL 12 or above) - if major >= 12 { - options = map[string]string{ - "restore_command": fmt.Sprintf( - "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", - postgres.LogPath, postgres.LogFileName), - "recovery_target_timeline": "latest", - "primary_slot_name": slotName, - "primary_conninfo": primaryConnInfo, - } - } - // Ensure that override.conf file contains just the above options changed, err = configfile.WritePostgresConfiguration(targetFile, options) if err != nil { @@ -361,21 +328,29 @@ var cleanupAutoConfOptions = []string{ // migratePostgresAutoConfFile migrates options managed by the operator from `postgresql.auto.conf` file, // to `override.conf` file for an upgrade case. // Returns a boolean indicating if any changes were done and any errors encountered -func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool, error) { +func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (changed bool, err error) { contextLogger := log.FromContext(ctx).WithName("migratePostgresAutoConfFile") + // this is an idempotent operation. Ensures that we always include the override import. + // See: #5747 + if changed, err = configfile.EnsureIncludes(path.Join(instance.PgData, "postgresql.conf"), + constants.PostgresqlOverrideConfigurationFile); err != nil { + return false, fmt.Errorf("migrating replication settings: %w", + err) + } + overrideConfPath := filepath.Join(instance.PgData, constants.PostgresqlOverrideConfigurationFile) autoConfFile := filepath.Join(instance.PgData, "postgresql.auto.conf") autoConfContent, readLinesErr := fileutils.ReadFileLines(autoConfFile) if readLinesErr != nil { - return false, fmt.Errorf("error while reading postgresql.auto.conf file: %w", readLinesErr) + return changed, fmt.Errorf("error while reading postgresql.auto.conf file: %w", readLinesErr) } overrideConfExists, _ := fileutils.FileExists(overrideConfPath) options := configfile.ReadLinesFromConfigurationContents(autoConfContent, migrateAutoConfOptions...) if len(options) == 0 && overrideConfExists { contextLogger.Trace("no action taken, options slice is empty") - return false, nil + return changed, nil } contextLogger.Info("Start to migrate replication settings", @@ -389,15 +364,7 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool // later during the configuration update. We create it here just as a precaution. if !overrideConfExists { if _, err := fileutils.WriteLinesToFile(overrideConfPath, options); err != nil { - return false, fmt.Errorf("migrating replication settings: %w", - err) - } - - if _, err := configfile.EnsureIncludes( - path.Join(instance.PgData, "postgresql.conf"), - constants.PostgresqlOverrideConfigurationFile, - ); err != nil { - return false, fmt.Errorf("migrating replication settings: %w", + return changed, fmt.Errorf("migrating replication settings: %w", err) } } @@ -420,23 +387,23 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool // createPostgresqlConfiguration creates the PostgreSQL configuration to be // used for this cluster and return it and its sha256 checksum -func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings bool) (string, string, error) { - // Extract the PostgreSQL major version - fromVersion, err := cluster.GetPostgresqlVersion() - if err != nil { - return "", "", err - } - +func createPostgresqlConfiguration( + ctx context.Context, + cluster *apiv1.Cluster, + preserveUserSettings bool, + majorVersion int, + operationType postgresClient.OperationType_Type, +) (string, string, error) { info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - MajorVersion: fromVersion, + MajorVersion: majorVersion, UserSettings: cluster.Spec.PostgresConfiguration.Parameters, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: cluster.Spec.PostgresConfiguration.AdditionalLibraries, IsReplicaCluster: cluster.IsReplica(), IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&cluster.ObjectMeta), IsAlterSystemEnabled: cluster.Spec.PostgresConfiguration.EnableAlterSystem, - SynchronousStandbyNames: replication.GetSynchronousStandbyNames(cluster), + SynchronousStandbyNames: replication.GetSynchronousStandbyNames(ctx, cluster), } if preserveUserSettings { @@ -456,13 +423,48 @@ func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings } sort.Strings(info.TemporaryTablespaces) + // Set additional extensions + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + info.AdditionalExtensions = append( + info.AdditionalExtensions, + postgres.AdditionalExtensionConfiguration{ + Name: extension.Name, + ExtensionControlPath: extension.ExtensionControlPath, + DynamicLibraryPath: extension.DynamicLibraryPath, + }, + ) + } + // Setup minimum replay delay if we're on a replica cluster if cluster.IsReplica() && cluster.Spec.ReplicaCluster.MinApplyDelay != nil { info.RecoveryMinApplyDelay = cluster.Spec.ReplicaCluster.MinApplyDelay.Duration } - conf, sha256 := postgres.CreatePostgresqlConfFile(postgres.CreatePostgresqlConfiguration(info)) - return conf, sha256, nil + if isSynchronizeLogicalDecodingEnabled(cluster) { + slots := make([]string, 0, len(cluster.Status.InstanceNames)-1) + for _, instanceName := range cluster.Status.InstanceNames { + if instanceName == cluster.Status.CurrentPrimary { + continue + } + slots = append(slots, cluster.GetSlotNameFromInstanceName(instanceName)) + } + info.SynchronizedStandbySlots = slots + } + + config, err := plugin.CreatePostgresqlConfigurationWithPlugins(ctx, info, operationType) + if err != nil { + return "", "", err + } + + file, sha := postgres.CreatePostgresqlConfFile(config) + return file, sha, nil +} + +func isSynchronizeLogicalDecodingEnabled(cluster *apiv1.Cluster) bool { + return cluster.Spec.ReplicationSlots != nil && + cluster.Spec.ReplicationSlots.HighAvailability != nil && + cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() && + cluster.Spec.ReplicationSlots.HighAvailability.SynchronizeLogicalDecoding } // configurePostgresForImport configures Postgres to be optimized for the firt import diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 7d040f8071..48cd59847b 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -21,11 +24,15 @@ import ( "strings" "time" + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -84,7 +91,6 @@ var _ = Describe("testing the building of the ldap config string", func() { }) It("correctly builds a bindSearchAuth string", func() { str := buildLDAPConfigString(&cluster, ldapPassword) - fmt.Printf("here %s\n", str) Expect(str).To(Equal(fmt.Sprintf(`host all all 0.0.0.0/0 ldap ldapserver="%s" ldapport=%d `+ `ldapscheme="%s" ldaptls=1 ldapbasedn="%s" ldapbinddn="%s" `+ `ldapbindpasswd="%s" ldapsearchfilter="%s" ldapsearchattribute="%s"`, @@ -117,6 +123,10 @@ var _ = Describe("testing the building of the ldap config string", func() { }) var _ = Describe("Test building of the list of temporary tablespaces", func() { + defaultVersion, defaultVersionErr := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(defaultVersionErr).ToNot(HaveOccurred()) + defaultMajor := int(defaultVersion.Major()) + clusterWithoutTablespaces := apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "configurationTest", @@ -166,26 +176,39 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { }, } - It("doesn't set temp_tablespaces if there are no declared tablespaces", func() { - config, _, err := createPostgresqlConfiguration(&clusterWithoutTablespaces, true) - Expect(err).ShouldNot(HaveOccurred()) + It("doesn't set temp_tablespaces if there are no declared tablespaces", func(ctx SpecContext) { + config, _, err := createPostgresqlConfiguration( + ctx, &clusterWithoutTablespaces, true, defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) - It("doesn't set temp_tablespaces if there are no temporary tablespaces", func() { - config, _, err := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true) - Expect(err).ShouldNot(HaveOccurred()) + It("doesn't set temp_tablespaces if there are no temporary tablespaces", func(ctx SpecContext) { + config, _, err := createPostgresqlConfiguration( + ctx, &clusterWithoutTemporaryTablespaces, true, defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) - It("sets temp_tablespaces when there are temporary tablespaces", func() { - config, _, err := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true) - Expect(err).ShouldNot(HaveOccurred()) + It("sets temp_tablespaces when there are temporary tablespaces", func(ctx SpecContext) { + config, _, err := createPostgresqlConfiguration( + ctx, &clusterWithTemporaryTablespaces, true, defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).To(ContainSubstring("temp_tablespaces = 'other_temporary_tablespace,temporary_tablespace'")) }) }) var _ = Describe("recovery_min_apply_delay", func() { + defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + defaultMajor := int(defaultVersion.Major()) + primaryCluster := apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "configurationTest", @@ -231,27 +254,36 @@ var _ = Describe("recovery_min_apply_delay", func() { }, } - It("do not set recovery_min_apply_delay in primary clusters", func() { + It("do not set recovery_min_apply_delay in primary clusters", func(ctx SpecContext) { Expect(primaryCluster.IsReplica()).To(BeFalse()) - config, _, err := createPostgresqlConfiguration(&primaryCluster, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _, err := createPostgresqlConfiguration( + ctx, &primaryCluster, true, defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) - It("set recovery_min_apply_delay in replica clusters when set", func() { + It("set recovery_min_apply_delay in replica clusters when set", func(ctx SpecContext) { Expect(replicaCluster.IsReplica()).To(BeTrue()) - config, _, err := createPostgresqlConfiguration(&replicaCluster, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _, err := createPostgresqlConfiguration( + ctx, &replicaCluster, true, defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).To(ContainSubstring("recovery_min_apply_delay = '3600s'")) }) - It("do not set recovery_min_apply_delay in replica clusters when not set", func() { + It("do not set recovery_min_apply_delay in replica clusters when not set", func(ctx SpecContext) { Expect(replicaClusterWithNoDelay.IsReplica()).To(BeTrue()) - config, _, err := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _, err := createPostgresqlConfiguration( + ctx, &replicaClusterWithNoDelay, true, defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) }) diff --git a/pkg/management/postgres/conninfo.go b/pkg/management/postgres/conninfo.go index 8b43d42c26..d82e1ce45c 100644 --- a/pkg/management/postgres/conninfo.go +++ b/pkg/management/postgres/conninfo.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/constants/constants.go b/pkg/management/postgres/constants/constants.go index 51d73ac1f7..b84ef3b7e9 100644 --- a/pkg/management/postgres/constants/constants.go +++ b/pkg/management/postgres/constants/constants.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package constants provides the needed constants in the postgres package @@ -51,4 +54,9 @@ const ( // Startup is the name of a file that is created once during the first reconcile of an instance Startup = "cnpg_initialized" + + // CheckEmptyWalArchiveFile is the name of the file in the PGDATA that, + // if present, requires the WAL archiver to check that the backup object + // store is empty. + CheckEmptyWalArchiveFile = ".check-empty-wal-archive" ) diff --git a/pkg/management/postgres/consts.go b/pkg/management/postgres/consts.go index 244c174d1f..e38e157a58 100644 --- a/pkg/management/postgres/consts.go +++ b/pkg/management/postgres/consts.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/ident.go b/pkg/management/postgres/ident.go index 9c50f96c8b..0b40640054 100644 --- a/pkg/management/postgres/ident.go +++ b/pkg/management/postgres/ident.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index f7d44e9d73..5951e2c50f 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package postgres contains the function about starting up, @@ -30,14 +33,17 @@ import ( "sort" "time" + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/jackc/pgx/v5" ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" @@ -47,12 +53,14 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/system" ) -const ( - // CheckEmptyWalArchiveFile is the name of the file in the PGDATA that, - // if present, requires the WAL archiver to check that the backup object - // store is empty. - CheckEmptyWalArchiveFile = ".check-empty-wal-archive" -) +type connectionProvider interface { + // GetSuperUserDB returns the superuser database connection + GetSuperUserDB() (*sql.DB, error) + // GetTemplateDB returns the template database connection + GetTemplateDB() (*sql.DB, error) + // ConnectionPool returns the connection pool for this instance + ConnectionPool() pool.Pooler +} // InitInfo contains all the info needed to bootstrap a new PostgreSQL instance type InitInfo struct { @@ -119,11 +127,11 @@ type InitInfo struct { TablespaceMapFile []byte } -// CheckTargetDataDirectory ensures that the target data directory does not exist. -// This is a safety check we do before initializing a new instance data directory. +// EnsureTargetDirectoriesDoNotExist ensures that the target data and WAL directories do not exist. +// This is a safety check we do before initializing a new instance. // // If the PGDATA directory already exists and contains a valid PostgreSQL control file, -// the function moves its contents to a uniquely named directory. +// the function moves the contents to uniquely named directories. // If no valid control file is found, the function assumes the directory is the result of // a failed initialization attempt and removes it. // @@ -139,47 +147,84 @@ type InitInfo struct { // important user data. This is particularly relevant when using static provisioning // of PersistentVolumeClaims (PVCs), as it prevents accidental overwriting of a valid // data directory that may exist in the PersistentVolumes (PVs). -func (info InitInfo) CheckTargetDataDirectory(ctx context.Context) error { +func (info InitInfo) EnsureTargetDirectoriesDoNotExist(ctx context.Context) error { contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData) pgDataExists, err := fileutils.FileExists(info.PgData) if err != nil { - log.Error(err, "Error while checking for an existing PGData") - return fmt.Errorf("while verifying is PGDATA exists: %w", err) + contextLogger.Error(err, "Error while checking for an existing data directory") + return fmt.Errorf("while verifying if the data directory exists: %w", err) + } + + pgWalExists := false + if info.PgWal != "" { + if pgWalExists, err = fileutils.FileExists(info.PgWal); err != nil { + contextLogger.Error(err, "Error while checking for an existing WAL directory") + return fmt.Errorf("while verifying if the WAL directory exists: %w", err) + } } - if !pgDataExists { - // The PGDATA directory doesn't exist. We can definitely - // write to it + + if !pgDataExists && !pgWalExists { return nil } - // We've an existing directory. Let's check if this is a real - // PGDATA directory or not. - out, err := info.GetInstance().GetPgControldata() - if err != nil { - contextLogger.Info("pg_controldata check on existing directory failed, cleaning it up", - "out", out, "err", err) + out, err := info.GetInstance(nil).GetPgControldata() + if err == nil { + contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folders", "out", out) + return info.renameExistingTargetDataDirectories(ctx, pgWalExists) + } + contextLogger.Info("pg_controldata check on existing directory failed, cleaning up folders", "err", err, "out", out) + return info.removeExistingTargetDataDirectories(ctx, pgDataExists, pgWalExists) +} + +func (info InitInfo) removeExistingTargetDataDirectories(ctx context.Context, pgDataExists, pgWalExists bool) error { + contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData, "pgwal", info.PgWal) + + if pgDataExists { + contextLogger.Info("cleaning up existing data directory") if err := fileutils.RemoveDirectory(info.PgData); err != nil { contextLogger.Error(err, "error while cleaning up existing data directory") return err } + } - return nil + if pgWalExists { + contextLogger.Info("cleaning up existing WAL directory") + if err := fileutils.RemoveDirectory(info.PgWal); err != nil { + contextLogger.Error(err, "error while cleaning up existing WAL directory") + return err + } } - renamedDirectoryName := fmt.Sprintf("%s_%s", info.PgData, fileutils.FormatFriendlyTimestamp(time.Now())) - contextLogger = contextLogger.WithValues( - "out", out, - "newName", renamedDirectoryName, - ) + return nil +} + +func (info InitInfo) renameExistingTargetDataDirectories(ctx context.Context, pgWalExists bool) error { + contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData, "pgwal", info.PgWal) - contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folder") - if err := os.Rename(info.PgData, renamedDirectoryName); err != nil { - contextLogger.Error(err, "error while renaming existing data directory") + suffixTimestamp := fileutils.FormatFriendlyTimestamp(time.Now()) + + pgdataNewName := fmt.Sprintf("%s_%s", info.PgData, suffixTimestamp) + contextLogger = contextLogger.WithValues() + + contextLogger.Info("renaming the data directory", "pgdataNewName", pgdataNewName) + if err := os.Rename(info.PgData, pgdataNewName); err != nil { + contextLogger.Error(err, "error while renaming existing data directory", + "pgdataNewName", pgdataNewName) return fmt.Errorf("while renaming existing data directory: %w", err) } + if pgWalExists { + pgwalNewName := fmt.Sprintf("%s_%s", info.PgWal, suffixTimestamp) + + contextLogger.Info("renaming the WAL directory", "pgwalNewName", pgwalNewName) + if err := os.Rename(info.PgWal, pgwalNewName); err != nil { + contextLogger.Error(err, "error while renaming existing WAL directory") + return fmt.Errorf("while renaming existing WAL directory: %w", err) + } + } + return nil } @@ -250,16 +295,17 @@ func (info InitInfo) CreateDataDirectory() error { } // GetInstance gets the PostgreSQL instance which correspond to these init information -func (info InitInfo) GetInstance() *Instance { +func (info InitInfo) GetInstance(cluster *apiv1.Cluster) *Instance { postgresInstance := NewInstance() postgresInstance.PgData = info.PgData postgresInstance.StartupOptions = []string{"listen_addresses='127.0.0.1'"} + postgresInstance.Cluster = cluster return postgresInstance } // ConfigureNewInstance creates the expected users and databases in a new // PostgreSQL instance. If any error occurs, we return it -func (info InitInfo) ConfigureNewInstance(instance *Instance) error { +func (info InitInfo) ConfigureNewInstance(instance connectionProvider) error { log.Info("Configuring new PostgreSQL instance") dbSuperUser, err := instance.GetSuperUserDB() @@ -267,21 +313,21 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("while getting superuser database: %w", err) } - var existsRole bool - userRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1", - info.ApplicationUser) - err = userRow.Scan(&existsRole) - if err != nil { - return err - } - - if !existsRole { - _, err = dbSuperUser.Exec(fmt.Sprintf( - "CREATE ROLE %v LOGIN", - pgx.Identifier{info.ApplicationUser}.Sanitize())) - if err != nil { + if info.ApplicationUser != "" { + var existsRole bool + userRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1", + info.ApplicationUser) + if err = userRow.Scan(&existsRole); err != nil { return err } + + if !existsRole { + if _, err = dbSuperUser.Exec(fmt.Sprintf( + "CREATE ROLE %v LOGIN", + pgx.Identifier{info.ApplicationUser}.Sanitize())); err != nil { + return err + } + } } // Execute the custom set of init queries for the `postgres` database @@ -305,14 +351,22 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { if err = info.executeSQLRefs(dbTemplate, info.PostInitTemplateSQLRefsFolder); err != nil { return fmt.Errorf("could not execute post init application SQL refs: %w", err) } - if info.ApplicationDatabase == "" { + + filePath := filepath.Join(info.PgData, constants.CheckEmptyWalArchiveFile) + // We create the check empty wal archive file to tell that we should check if the + // destination path it is empty + if err := fileutils.CreateEmptyFile(filepath.Clean(filePath)); err != nil { + return fmt.Errorf("could not create %v file: %w", filePath, err) + } + + if info.ApplicationUser == "" || info.ApplicationDatabase == "" { return nil } var existsDB bool - dbRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_database WHERE datname = $1", info.ApplicationDatabase) - err = dbRow.Scan(&existsDB) - if err != nil { + dbRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_database WHERE datname = $1", + info.ApplicationDatabase) + if err = dbRow.Scan(&existsDB); err != nil { return err } @@ -339,13 +393,6 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("could not execute post init application SQL refs: %w", err) } - filePath := filepath.Join(info.PgData, CheckEmptyWalArchiveFile) - // We create the check empty wal archive file to tell that we should check if the - // destination path it is empty - if err := fileutils.CreateEmptyFile(filepath.Clean(filePath)); err != nil { - return fmt.Errorf("could not create %v file: %w", filePath, err) - } - return nil } @@ -412,6 +459,15 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { return err } + enabledPluginNamesSet := stringset.From(cluster.GetJobEnabledPluginNames()) + pluginCli, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + return fmt.Errorf("error while creating the plugin client: %w", err) + } + defer pluginCli.Close(ctx) + ctx = pluginClient.SetPluginClientInContext(ctx, pluginCli) + ctx = cluster.SetInContext(ctx) + coredumpFilter := cluster.GetCoredumpFilter() if err := system.SetCoredumpFilter(coredumpFilter); err != nil { return err @@ -422,14 +478,19 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { return err } - instance := info.GetInstance() + instance := info.GetInstance(cluster) // Detect an initdb bootstrap with import isImportBootstrap := cluster.Spec.Bootstrap != nil && cluster.Spec.Bootstrap.InitDB != nil && cluster.Spec.Bootstrap.InitDB.Import != nil - if applied, err := instance.RefreshConfigurationFilesFromCluster(cluster, true); err != nil { + if applied, err := instance.RefreshConfigurationFilesFromCluster( + ctx, + cluster, + true, + postgres.OperationType_TYPE_INIT, + ); err != nil { return fmt.Errorf("while writing the config: %w", err) } else if !applied { return fmt.Errorf("could not apply the config") @@ -437,7 +498,6 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { // Prepare the managed configuration file (override.conf) primaryConnInfo := info.GetPrimaryConnInfo() - slotName := cluster.GetSlotNameFromInstanceName(info.PodName) if isImportBootstrap { // Write a special configuration for the import phase @@ -446,7 +506,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { } } else { // Write standard replication configuration - if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, slotName); err != nil { + if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil { return fmt.Errorf("while configuring Postgres for replication: %w", err) } } @@ -472,8 +532,8 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { // In case of import bootstrap, we restore the standard configuration file content if isImportBootstrap { - /// Write standard replication configuration - if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, slotName); err != nil { + // Write standard replication configuration + if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil { return fmt.Errorf("while configuring Postgres for replication: %w", err) } diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go new file mode 100644 index 0000000000..bc81cfd3c0 --- /dev/null +++ b/pkg/management/postgres/initdb_test.go @@ -0,0 +1,201 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package postgres + +import ( + "os" + "path" + "path/filepath" + "regexp" + + "github.com/DATA-DOG/go-sqlmock" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("EnsureTargetDirectoriesDoNotExist", func() { + var initInfo InitInfo + + BeforeEach(func() { + initInfo = InitInfo{ + PgData: GinkgoT().TempDir(), + PgWal: GinkgoT().TempDir(), + } + Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().NotTo(HaveOccurred()) + Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed()) + }) + + It("should do nothing if both data and WAL directories do not exist", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgData)).Should(Succeed()) + Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed()) + + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) + + It("should remove existing directories if pg_controldata check fails", func(ctx SpecContext) { + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) + + It("should remove data directory even if WAL directory is not present", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed()) + + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) + + It("should remove WAL directory even if data directory is not present", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgData)).Should(Succeed()) + + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) +}) + +var _ = Describe("renameExistingTargetDataDirectories", func() { + var initInfo InitInfo + + BeforeEach(func() { + initInfo = InitInfo{ + PgData: GinkgoT().TempDir(), + PgWal: GinkgoT().TempDir(), + } + Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().NotTo(HaveOccurred()) + Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed()) + }) + + It("should rename existing data and WAL directories", func(ctx SpecContext) { + err := initInfo.renameExistingTargetDataDirectories(ctx, true) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + + filelist, err := filepath.Glob(initInfo.PgData + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(HaveLen(1)) + + filelist, err = filepath.Glob(initInfo.PgWal + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(HaveLen(1)) + }) + + It("should rename existing data without WAL directories", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed()) + + err := initInfo.renameExistingTargetDataDirectories(ctx, false) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + + filelist, err := filepath.Glob(initInfo.PgData + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(HaveLen(1)) + + filelist, err = filepath.Glob(initInfo.PgWal + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(BeEmpty()) + }) +}) + +var _ = Describe("ConfigureNewInstance role creation", func() { + var ( + info InitInfo + mi *mockInstance + mockSuperUser sqlmock.Sqlmock + testDir string + ) + + BeforeEach(func() { + var err error + + testDir = path.Join(GinkgoT().TempDir(), "initdb_test") + + Expect(os.MkdirAll(testDir, 0o700)).To(Succeed()) + + mi = &mockInstance{} + mi.superUserDB, mockSuperUser, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + + mi.appDB, _, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + + info = InitInfo{ + ApplicationUser: "app_user", + PostInitSQL: []string{"CREATE ROLE post_init_role LOGIN"}, + PgData: testDir, + } + }) + + AfterEach(func() { + Expect(mockSuperUser.ExpectationsWereMet()).NotTo(HaveOccurred()) + }) + + It("ensures that we create the application user before postIniSQL", func() { + // Expect check if application role exists + mockSuperUser.ExpectQuery(regexp.QuoteMeta("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1")). + WithArgs("app_user"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(false)) + + mockSuperUser.ExpectExec(`CREATE ROLE \"app_user\" LOGIN`). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockSuperUser.ExpectExec("CREATE ROLE post_init_role LOGIN"). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := info.ConfigureNewInstance(mi) + + Expect(err).NotTo(HaveOccurred()) + }) + + It("ensures that we do not create the application user if already exists", func() { + // Expect check if application role exists - return true this time + mockSuperUser.ExpectQuery(regexp.QuoteMeta("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1")). + WithArgs("app_user"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + + // No direct role creation expected + + mockSuperUser.ExpectExec("CREATE ROLE post_init_role LOGIN"). + WillReturnResult(sqlmock.NewResult(1, 1)) + + // Execute function under test + err := info.ConfigureNewInstance(mi) + + // Verify results + Expect(err).NotTo(HaveOccurred()) + Expect(mockSuperUser.ExpectationsWereMet()).NotTo(HaveOccurred()) + }) +}) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 035d2ad736..74f0caee2c 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -20,6 +23,7 @@ import ( "context" "crypto/tls" "database/sql" + "encoding/json" "errors" "fmt" "io/fs" @@ -28,10 +32,14 @@ import ( "os/exec" "path" "path/filepath" + "runtime" "strconv" + "strings" + "sync" "time" "github.com/blang/semver" + "github.com/cloudnative-pg/machinery/pkg/envmap" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" @@ -53,7 +61,6 @@ import ( ) const ( - postgresName = "postgres" pgCtlName = "pg_ctl" pgRewindName = "pg_rewind" pgBaseBackupName = "pg_basebackup" @@ -67,6 +74,15 @@ const ( pgPingNoAttempt = 3 // connection not attempted (bad params) ) +// GetPostgresExecutableName returns the name of the PostgreSQL executable +func GetPostgresExecutableName() string { + if name := os.Getenv("POSTGRES_NAME"); name != "" { + return name + } + + return "postgres" +} + // shutdownMode represent a way to request the postmaster shutdown type shutdownMode string @@ -141,13 +157,13 @@ type Instance struct { primaryPool *pool.ConnectionPool // The namespace of the k8s object representing this cluster - Namespace string + namespace string // The name of the Pod where the controller is executing - PodName string + podName string - // The name of the cluster of which this Pod is belonging - ClusterName string + // The name of the cluster this instance belongs in + clusterName string // The sha256 of the config. It is computed on the config string, before // adding the PostgreSQL CNPGConfigSha256 parameter @@ -208,10 +224,35 @@ type Instance struct { // MetricsPortTLS enables TLS on the port used to publish metrics over HTTP/HTTPS MetricsPortTLS bool + serverCertificateHandler serverCertificateHandler + + // Cluster is the cluster this instance belongs to + Cluster *apiv1.Cluster +} + +type serverCertificateHandler struct { + operationInProgress sync.Mutex + // ServerCertificate is the certificate we use to serve https connections ServerCertificate *tls.Certificate } +// GetServerCertificate returns the server certificate for the instance +func (instance *Instance) GetServerCertificate() *tls.Certificate { + instance.serverCertificateHandler.operationInProgress.Lock() + defer instance.serverCertificateHandler.operationInProgress.Unlock() + + return instance.serverCertificateHandler.ServerCertificate +} + +// SetServerCertificate sets the server certificate for the instance +func (instance *Instance) SetServerCertificate(cert *tls.Certificate) { + instance.serverCertificateHandler.operationInProgress.Lock() + defer instance.serverCertificateHandler.operationInProgress.Unlock() + + instance.serverCertificateHandler.ServerCertificate = cert +} + // SetPostgreSQLAutoConfWritable allows or deny writes to the // `postgresql.auto.conf` file in PGDATA func (instance *Instance) SetPostgreSQLAutoConfWritable(writeable bool) error { @@ -224,6 +265,11 @@ func (instance *Instance) SetPostgreSQLAutoConfWritable(writeable bool) error { return os.Chmod(autoConfFileName, mode) } +// IsReady runs PgIsReady +func (instance *Instance) IsReady() error { + return PgIsReady() +} + // IsFenced checks whether the instance is marked as fenced func (instance *Instance) IsFenced() bool { return instance.fenced.Load() @@ -261,16 +307,9 @@ func (instance *Instance) CheckHasDiskSpaceForWAL(ctx context.Context) (bool, er } pgControlData := utils.ParsePgControldataOutput(pgControlDataString) - walSegmentSizeString, ok := pgControlData["Bytes per WAL segment"] - if !ok { - return false, fmt.Errorf("no 'Bytes per WAL segment' section into pg_controldata output") - } - - walSegmentSize, err := strconv.Atoi(walSegmentSizeString) + walSegmentSize, err := pgControlData.GetBytesPerWALSegment() if err != nil { - return false, fmt.Errorf( - "wrong 'Bytes per WAL segment' pg_controldata value (not an integer): '%s' %w", - walSegmentSizeString, err) + return false, err } walDirectory := path.Join(instance.PgData, pgWalDirectory) @@ -330,7 +369,7 @@ func (instance *Instance) VerifyPgDataCoherence(ctx context.Context) error { } // creates a bare pg_ident.conf that only grants local access - _, err := instance.RefreshPGIdent(nil) + _, err := instance.RefreshPGIdent(ctx, nil) return err } @@ -366,6 +405,24 @@ func NewInstance() *Instance { } } +// WithNamespace specifies the namespace for this Instance +func (instance *Instance) WithNamespace(namespace string) *Instance { + instance.namespace = namespace + return instance +} + +// WithPodName specifies the pod name for this Instance +func (instance *Instance) WithPodName(podName string) *Instance { + instance.podName = podName + return instance +} + +// WithClusterName specifies the name of the cluster this Instance belongs to +func (instance *Instance) WithClusterName(clusterName string) *Instance { + instance.clusterName = clusterName + return instance +} + // RetryUntilServerAvailable is the default retry configuration that is used // to wait for a successful connection to a certain server var RetryUntilServerAvailable = wait.Backoff{ @@ -435,7 +492,7 @@ func (instance *Instance) Startup() error { } pgCtlCmd := exec.Command(pgCtlName, options...) // #nosec - pgCtlCmd.Env = instance.Env + pgCtlCmd.Env = instance.buildPostgresEnv() err := execlog.RunStreaming(pgCtlCmd, pgCtlName) if err != nil { return fmt.Errorf("error starting PostgreSQL instance: %w", err) @@ -458,7 +515,8 @@ func (instance *Instance) ShutdownConnections() { // with Startup. // This function will return an error whether PostgreSQL is still up // after the shutdown request. -func (instance *Instance) Shutdown(options shutdownOptions) error { +func (instance *Instance) Shutdown(ctx context.Context, options shutdownOptions) error { + contextLogger := log.FromContext(ctx) instance.ShutdownConnections() // check instance status @@ -484,10 +542,11 @@ func (instance *Instance) Shutdown(options shutdownOptions) error { pgCtlOptions = append(pgCtlOptions, "-t", fmt.Sprintf("%v", *options.Timeout)) } - log.Info("Shutting down instance", + contextLogger.Info("Shutting down instance", "pgdata", instance.PgData, "mode", options.Mode, "timeout", options.Timeout, + "pgCtlOptions", pgCtlOptions, ) pgCtlCmd := exec.Command(pgCtlName, pgCtlOptions...) // #nosec @@ -518,11 +577,14 @@ func (instance *Instance) TryShuttingDownSmartFast(ctx context.Context) error { if smartTimeout > 0 { contextLogger.Info("Requesting smart shutdown of the PostgreSQL instance") - err = instance.Shutdown(shutdownOptions{ - Mode: shutdownModeSmart, - Wait: true, - Timeout: &smartTimeout, - }) + err = instance.Shutdown( + ctx, + shutdownOptions{ + Mode: shutdownModeSmart, + Wait: true, + Timeout: &smartTimeout, + }, + ) if err != nil { contextLogger.Warning("Error while handling the smart shutdown request", "err", err) } @@ -530,10 +592,12 @@ func (instance *Instance) TryShuttingDownSmartFast(ctx context.Context) error { if err != nil || smartTimeout == 0 { contextLogger.Info("Requesting fast shutdown of the PostgreSQL instance") - err = instance.Shutdown(shutdownOptions{ - Mode: shutdownModeFast, - Wait: true, - }) + err = instance.Shutdown(ctx, + shutdownOptions{ + Mode: shutdownModeFast, + Wait: true, + }, + ) } if err != nil { contextLogger.Error(err, "Error while shutting down the PostgreSQL instance") @@ -552,19 +616,24 @@ func (instance *Instance) TryShuttingDownFastImmediate(ctx context.Context) erro contextLogger := log.FromContext(ctx) contextLogger.Info("Requesting fast shutdown of the PostgreSQL instance") - err := instance.Shutdown(shutdownOptions{ - Mode: shutdownModeFast, - Wait: true, - Timeout: &instance.MaxSwitchoverDelay, - }) + err := instance.Shutdown( + ctx, + shutdownOptions{ + Mode: shutdownModeFast, + Wait: true, + Timeout: &instance.MaxSwitchoverDelay, + }, + ) var exitError *exec.ExitError if errors.As(err, &exitError) { contextLogger.Info("Graceful shutdown failed. Issuing immediate shutdown", "exitCode", exitError.ExitCode()) - err = instance.Shutdown(shutdownOptions{ - Mode: shutdownModeImmediate, - Wait: true, - }) + err = instance.Shutdown(ctx, + shutdownOptions{ + Mode: shutdownModeImmediate, + Wait: true, + }, + ) } return err } @@ -592,8 +661,10 @@ func (instance *Instance) Reload(ctx context.Context) error { "reload", } - contextLogger.Info("Requesting configuration reload", - "pgdata", instance.PgData) + contextLogger.Info( + "Requesting configuration reload", + "pgdata", instance.PgData, + "pgCtlOptions", options) // Need to reload certificates if they changed if instance.primaryPool != nil { @@ -612,7 +683,7 @@ func (instance *Instance) Reload(ctx context.Context) error { // Run this instance returning an OS process needed // to control the instance execution func (instance *Instance) Run() (*execlog.StreamingCmd, error) { - process, err := instance.CheckForExistingPostmaster(postgresName) + process, err := instance.CheckForExistingPostmaster(GetPostgresExecutableName()) if err != nil { return nil, err } @@ -645,11 +716,11 @@ func (instance *Instance) Run() (*execlog.StreamingCmd, error) { return nil, err } - postgresCmd := exec.Command(postgresName, options...) // #nosec - postgresCmd.Env = instance.Env + postgresCmd := exec.Command(GetPostgresExecutableName(), options...) // #nosec + postgresCmd.Env = instance.buildPostgresEnv() compatibility.AddInstanceRunCommands(postgresCmd) - streamingCmd, err := execlog.RunStreamingNoWait(postgresCmd, postgresName) + streamingCmd, err := execlog.RunStreamingNoWait(postgresCmd, GetPostgresExecutableName()) if err != nil { return nil, err } @@ -657,6 +728,58 @@ func (instance *Instance) Run() (*execlog.StreamingCmd, error) { return streamingCmd, nil } +// buildPostgresEnv builds the environment variables that should be used by PostgreSQL +// to run the main process, taking care of adding any library path that is needed for +// extensions. +func (instance *Instance) buildPostgresEnv() []string { + env := instance.Env + if env == nil { + env = os.Environ() + } + envMap, _ := envmap.Parse(env) + envMap["PG_OOM_ADJUST_FILE"] = "/proc/self/oom_score_adj" + envMap["PG_OOM_ADJUST_VALUE"] = "0" + + if instance.Cluster == nil { + return envMap.StringSlice() + } + + // If there are no additional library paths, we use the environment variables + // of the current process + additionalLibraryPaths := collectLibraryPaths(instance.Cluster.Spec.PostgresConfiguration.Extensions) + if len(additionalLibraryPaths) == 0 { + return envMap.StringSlice() + } + + // We add the additional library paths after the entries that are already + // available. + currentLibraryPath := envMap["LD_LIBRARY_PATH"] + if currentLibraryPath != "" { + currentLibraryPath += ":" + } + currentLibraryPath += strings.Join(additionalLibraryPaths, ":") + envMap["LD_LIBRARY_PATH"] = currentLibraryPath + + return envMap.StringSlice() +} + +// collectLibraryPaths returns a list of PATHS which should be added to LD_LIBRARY_PATH +// given an extension +func collectLibraryPaths(extensionList []apiv1.ExtensionConfiguration) []string { + result := make([]string, 0, len(extensionList)) + + for _, extension := range extensionList { + for _, libraryPath := range extension.LdLibraryPath { + result = append( + result, + filepath.Join(postgres.ExtensionsBaseDirectory, extension.Name, libraryPath), + ) + } + } + + return result +} + // WithActiveInstance execute the internal function while this // PostgreSQL instance is running func (instance *Instance) WithActiveInstance(inner func() error) error { @@ -681,7 +804,7 @@ func (instance *Instance) WithActiveInstance(inner func() error) error { } defer func() { - if err := instance.Shutdown(defaultShutdownOptions); err != nil { + if err := instance.Shutdown(ctx, defaultShutdownOptions); err != nil { log.Info("Error while deactivating instance", "err", err) } }() @@ -720,7 +843,7 @@ func (instance *Instance) GetPgVersion() (semver.Version, error) { } // ConnectionPool gets or initializes the connection pool for this instance -func (instance *Instance) ConnectionPool() *pool.ConnectionPool { +func (instance *Instance) ConnectionPool() pool.Pooler { const applicationName = "cnpg-instance-manager" if instance.pool == nil { socketDir := GetSocketDir() @@ -776,14 +899,14 @@ func (instance *Instance) Demote(ctx context.Context, cluster *apiv1.Cluster) er contextLogger := log.FromContext(ctx) contextLogger.Info("Demoting instance", "pgpdata", instance.PgData) - slotName := cluster.GetSlotNameFromInstanceName(instance.PodName) + slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) _, err := UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) return err } // WaitForPrimaryAvailable waits until we can connect to the primary func (instance *Instance) WaitForPrimaryAvailable(ctx context.Context) error { - primaryConnInfo := instance.GetPrimaryConnInfo() + " dbname=postgres connect_timeout=5" + primaryConnInfo := instance.GetPrimaryConnInfo() + " connect_timeout=5" log.Info("Waiting for the new primary to be available", "primaryConnInfo", primaryConnInfo) @@ -799,22 +922,6 @@ func (instance *Instance) WaitForPrimaryAvailable(ctx context.Context) error { return waitForConnectionAvailable(ctx, db) } -// CompleteCrashRecovery temporary starts up the server and wait for it -// to be fully available for queries. This will ensure that the crash recovery -// is fully done. -// Important: this function must be called only when the instance isn't started -func (instance *Instance) CompleteCrashRecovery(ctx context.Context) error { - log.Info("Waiting for server to complete crash recovery") - - defer func() { - instance.ShutdownConnections() - }() - - return instance.WithActiveInstance(func() error { - return instance.WaitForSuperuserConnectionAvailable(ctx) - }) -} - // WaitForSuperuserConnectionAvailable waits until we can connect to this // instance using the superuser account func (instance *Instance) WaitForSuperuserConnectionAvailable(ctx context.Context) error { @@ -828,18 +935,19 @@ func (instance *Instance) WaitForSuperuserConnectionAvailable(ctx context.Contex // waitForConnectionAvailable waits until we can connect to the passed // sql.DB connection -func waitForConnectionAvailable(context context.Context, db *sql.DB) error { +func waitForConnectionAvailable(ctx context.Context, db *sql.DB) error { + contextLogger := log.FromContext(ctx) errorIsRetryable := func(err error) bool { - if context.Err() != nil { + if ctx.Err() != nil { return false } return err != nil } return retry.OnError(RetryUntilServerAvailable, errorIsRetryable, func() error { - err := db.PingContext(context) + err := db.PingContext(ctx) if err != nil { - log.Info("DB not available, will retry", "err", err) + contextLogger.Info("DB not available, will retry", "err", err) } return err }) @@ -903,9 +1011,42 @@ func (instance *Instance) WaitForConfigReload(ctx context.Context) (*postgres.Po return status, nil } +// GetSynchronousReplicationMetadata reads the current PostgreSQL configuration +// and extracts the parameters that were used to compute the synchronous_standby_names +// GUC. +func (instance *Instance) GetSynchronousReplicationMetadata( + ctx context.Context, +) (*postgres.SynchronousStandbyNamesConfig, error) { + db, err := instance.GetSuperUserDB() + if err != nil { + return nil, err + } + + var metadata string + row := db.QueryRowContext( + ctx, fmt.Sprintf("SHOW %s", postgres.CNPGSynchronousStandbyNamesMetadata)) + err = row.Scan(&metadata) + if err != nil { + return nil, err + } + + if len(metadata) == 0 { + return nil, nil + } + + var result postgres.SynchronousStandbyNamesConfig + if err := json.Unmarshal([]byte(metadata), &result); err != nil { + return nil, fmt.Errorf("while decoding synchronous_standby_names metadata: %w", err) + } + + return &result, nil +} + // waitForStreamingConnectionAvailable waits until we can connect to the passed // sql.DB connection using streaming protocol -func waitForStreamingConnectionAvailable(db *sql.DB) error { +func waitForStreamingConnectionAvailable(ctx context.Context, db *sql.DB) error { + contextLogger := log.FromContext(ctx) + errorIsRetryable := func(err error) bool { return err != nil } @@ -913,7 +1054,7 @@ func waitForStreamingConnectionAvailable(db *sql.DB) error { return retry.OnError(RetryUntilServerAvailable, errorIsRetryable, func() error { result, err := db.Query("IDENTIFY_SYSTEM") if err != nil || result.Err() != nil { - log.Info("DB not available, will retry", "err", err) + contextLogger.Info("DB not available, will retry", "err", err) return err } defer func() { @@ -982,7 +1123,7 @@ func (instance *Instance) removePgControlFileBackup() error { // Rewind uses pg_rewind to align this data directory with the contents of the primary node. // If postgres major version is >= 13, add "--restore-target-wal" option -func (instance *Instance) Rewind(ctx context.Context, postgresMajorVersion int) error { +func (instance *Instance) Rewind(ctx context.Context) error { contextLogger := log.FromContext(ctx) // Signal the liveness probe that we are running pg_rewind before starting postgres @@ -996,16 +1137,17 @@ func (instance *Instance) Rewind(ctx context.Context, postgresMajorVersion int) primaryConnInfo := instance.GetPrimaryConnInfo() options := []string{ "-P", - "--source-server", primaryConnInfo + " dbname=postgres", + "--source-server", primaryConnInfo, "--target-pgdata", instance.PgData, } - // As PostgreSQL 13 introduces support of restore from the WAL archive in pg_rewind, - // let’s automatically use it, if possible - if postgresMajorVersion >= 130000 { - options = append(options, "--restore-target-wal") + // make sure restore_command is set in override.conf + if _, err := configurePostgresOverrideConfFile(instance.PgData, primaryConnInfo, ""); err != nil { + return err } + options = append(options, "--restore-target-wal") + // Make sure PostgreSQL control file is not empty err := instance.managePgControlFileBackup() if err != nil { @@ -1017,7 +1159,7 @@ func (instance *Instance) Rewind(ctx context.Context, postgresMajorVersion int) "options", options) pgRewindCmd := exec.Command(pgRewindName, options...) // #nosec - pgRewindCmd.Env = instance.Env + pgRewindCmd.Env = instance.buildPostgresEnv() err = execlog.RunStreaming(pgRewindCmd, pgRewindName) if err != nil { contextLogger.Error(err, "Failed to execute pg_rewind", "options", options) @@ -1112,19 +1254,24 @@ func (instance *Instance) GetInstanceCommandChan() <-chan InstanceCommand { return instance.instanceCommandChan } -// GetClusterName returns the name of the cluster where this instance is running +// GetClusterName returns the name of the cluster where this instance belongs func (instance *Instance) GetClusterName() string { - return instance.ClusterName + return instance.clusterName } // GetPodName returns the name of the pod where this instance is running func (instance *Instance) GetPodName() string { - return instance.PodName + return instance.podName } // GetNamespaceName returns the name of the namespace where this instance is running func (instance *Instance) GetNamespaceName() string { - return instance.Namespace + return instance.namespace +} + +// GetArchitecture returns the runtime architecture +func (instance *Instance) GetArchitecture() string { + return runtime.GOARCH } // RequestFastImmediateShutdown request the lifecycle manager to shut down @@ -1216,7 +1363,7 @@ func (instance *Instance) waitForInstanceRestarted(ctx context.Context, after ti return err } var startTime time.Time - row := db.QueryRowContext(ctx, "SELECT pg_postmaster_start_time()") + row := db.QueryRowContext(ctx, "SELECT pg_catalog.pg_postmaster_start_time()") err = row.Scan(&startTime) if err != nil { return err @@ -1236,8 +1383,8 @@ func (instance *Instance) DropConnections() error { } if _, err := conn.Exec( - `SELECT pg_terminate_backend(pid) - FROM pg_stat_activity + `SELECT pg_catalog.pg_terminate_backend(pid) + FROM pg_catalog.pg_stat_activity WHERE pid <> pg_backend_pid() AND backend_type = 'client backend';`, ); err != nil { @@ -1249,7 +1396,15 @@ func (instance *Instance) DropConnections() error { // GetPrimaryConnInfo returns the DSN to reach the primary func (instance *Instance) GetPrimaryConnInfo() string { - return buildPrimaryConnInfo(instance.ClusterName+"-rw", instance.PodName) + result := buildPrimaryConnInfo(instance.GetClusterName()+"-rw", instance.GetPodName()) + " dbname=postgres" + + standbyTCPUserTimeout := os.Getenv("CNPG_STANDBY_TCP_USER_TIMEOUT") + if len(standbyTCPUserTimeout) > 0 { + result = fmt.Sprintf("%s tcp_user_timeout='%s'", result, + strings.ReplaceAll(strings.ReplaceAll(standbyTCPUserTimeout, `\`, `\\`), `'`, `\'`)) + } + + return result } // HandleInstanceCommandRequests execute a command requested by the reconciliation diff --git a/pkg/management/postgres/instance_replica.go b/pkg/management/postgres/instance_replica.go index 5c681b95f8..c8d3bc6354 100644 --- a/pkg/management/postgres/instance_replica.go +++ b/pkg/management/postgres/instance_replica.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -51,7 +54,7 @@ func (instance *Instance) RefreshReplicaConfiguration( return changed, nil } - if cluster.IsReplica() && cluster.Status.TargetPrimary == instance.PodName { + if cluster.IsReplica() && cluster.Status.TargetPrimary == instance.GetPodName() { result, err := instance.writeReplicaConfigurationForDesignatedPrimary(ctx, cli, cluster) return changed || result, err } @@ -60,8 +63,9 @@ func (instance *Instance) RefreshReplicaConfiguration( } func (instance *Instance) writeReplicaConfigurationForReplica(cluster *apiv1.Cluster) (changed bool, err error) { - slotName := cluster.GetSlotNameFromInstanceName(instance.PodName) - return UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) + slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) + primaryConnInfo := instance.GetPrimaryConnInfo() + return UpdateReplicaConfiguration(instance.PgData, primaryConnInfo, slotName) } func (instance *Instance) writeReplicaConfigurationForDesignatedPrimary( @@ -75,7 +79,7 @@ func (instance *Instance) writeReplicaConfigurationForDesignatedPrimary( } connectionString, err := external.ConfigureConnectionToServer( - ctx, cli, instance.Namespace, &server) + ctx, cli, instance.GetNamespaceName(), &server) if err != nil { return false, err } diff --git a/pkg/management/postgres/instance_test.go b/pkg/management/postgres/instance_test.go index defe47e317..5801cc3c4c 100644 --- a/pkg/management/postgres/instance_test.go +++ b/pkg/management/postgres/instance_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -21,8 +24,11 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/cloudnative-pg/machinery/pkg/fileutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -263,3 +269,95 @@ var _ = Describe("ALTER SYSTEM enable and disable in PostgreSQL <17", func() { Expect(info.Mode()).To(BeEquivalentTo(0o400)) }) }) + +var _ = Describe("buildPostgresEnv", func() { + var cluster apiv1.Cluster + var instance Instance + + BeforeEach(func() { + err := os.Unsetenv("LD_LIBRARY_PATH") + Expect(err).ToNot(HaveOccurred()) + + cluster = apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "foo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "foo:dev", + }, + }, + { + Name: "bar", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "bar:dev", + }, + }, + }, + }, + }, + } + instance.Cluster = &cluster + }) + + Context("Extensions enabled, LD_LIBRARY_PATH undefined", func() { + It("should be empty by default", func() { + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(BeEmpty()) + }) + }) + + Context("Extensions enabled, LD_LIBRARY_PATH defined", func() { + const ( + path1 = postgres.ExtensionsBaseDirectory + "/foo/syslib" + path2 = postgres.ExtensionsBaseDirectory + "/foo/sample" + path3 = postgres.ExtensionsBaseDirectory + "/bar/syslib" + path4 = postgres.ExtensionsBaseDirectory + "/bar/sample" + ) + finalPaths := strings.Join([]string{path1, path2, path3, path4}, ":") + + BeforeEach(func() { + cluster.Spec.PostgresConfiguration.Extensions[0].LdLibraryPath = []string{"/syslib", "sample/"} + cluster.Spec.PostgresConfiguration.Extensions[1].LdLibraryPath = []string{"./syslib", "./sample/"} + }) + + It("should be defined", func() { + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(Equal(fmt.Sprintf("LD_LIBRARY_PATH=%s", finalPaths))) + }) + It("should retain existing values", func() { + GinkgoT().Setenv("LD_LIBRARY_PATH", "/my/library/path") + + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(BeEquivalentTo(fmt.Sprintf("LD_LIBRARY_PATH=/my/library/path:%s", finalPaths))) + }) + }) + + Context("Extensions disabled", func() { + BeforeEach(func() { + cluster.Spec.PostgresConfiguration.Extensions = []apiv1.ExtensionConfiguration{} + }) + It("LD_LIBRARY_PATH should be empty", func() { + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(BeEmpty()) + }) + }) +}) + +func getLibraryPathFromEnv(envs []string) string { + var ldLibraryPath string + + for i := len(envs) - 1; i >= 0; i-- { + if strings.HasPrefix(envs[i], "LD_LIBRARY_PATH=") { + ldLibraryPath = envs[i] + break + } + } + + return ldLibraryPath +} diff --git a/pkg/management/postgres/join.go b/pkg/management/postgres/join.go index 419d4ccfd1..d800ceaad7 100644 --- a/pkg/management/postgres/join.go +++ b/pkg/management/postgres/join.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres import ( + "context" "fmt" "os/exec" @@ -33,7 +37,7 @@ import ( // ClonePgData clones an existing server, given its connection string, // to a certain data directory -func ClonePgData(connectionString, targetPgData, walDir string) error { +func ClonePgData(ctx context.Context, connectionString, targetPgData, walDir string) error { log.Info("Waiting for server to be available", "connectionString", connectionString) db, err := pool.NewDBConnection(connectionString, pool.ConnectionProfilePostgresqlPhysicalReplication) @@ -44,7 +48,7 @@ func ClonePgData(connectionString, targetPgData, walDir string) error { _ = db.Close() }() - err = waitForStreamingConnectionAvailable(db) + err = waitForStreamingConnectionAvailable(ctx, db) if err != nil { return fmt.Errorf("source server not available: %v", connectionString) } @@ -70,32 +74,24 @@ func ClonePgData(connectionString, targetPgData, walDir string) error { } // Join creates a new instance joined to an existing PostgreSQL cluster -func (info InitInfo) Join(cluster *apiv1.Cluster) error { +func (info InitInfo) Join(ctx context.Context, cluster *apiv1.Cluster) error { primaryConnInfo := buildPrimaryConnInfo(info.ParentNode, info.PodName) + " dbname=postgres connect_timeout=5" - pgVersion, err := cluster.GetPostgresqlVersion() - if err != nil { - log.Warning( - "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", - "imageName", cluster.GetImageName(), - "err", err) - } else if pgVersion >= 120000 { - // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. - // A short timeout could not be enough in case the instance is slow to send data, - // like when the I/O is overloaded. - primaryConnInfo += " options='-c wal_sender_timeout=0s'" - } + // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. + // A short timeout could not be enough in case the instance is slow to send data, + // like when the I/O is overloaded. + primaryConnInfo += " options='-c wal_sender_timeout=0s'" coredumpFilter := cluster.GetCoredumpFilter() if err := system.SetCoredumpFilter(coredumpFilter); err != nil { return err } - if err = ClonePgData(primaryConnInfo, info.PgData, info.PgWal); err != nil { + if err := ClonePgData(ctx, primaryConnInfo, info.PgData, info.PgWal); err != nil { return err } slotName := cluster.GetSlotNameFromInstanceName(info.PodName) - _, err = UpdateReplicaConfiguration(info.PgData, info.GetPrimaryConnInfo(), slotName) + _, err := UpdateReplicaConfiguration(info.PgData, info.GetPrimaryConnInfo(), slotName) return err } diff --git a/pkg/management/postgres/logicalimport/constants.go b/pkg/management/postgres/logicalimport/constants.go index 19c2853538..818237c759 100644 --- a/pkg/management/postgres/logicalimport/constants.go +++ b/pkg/management/postgres/logicalimport/constants.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/database.go b/pkg/management/postgres/logicalimport/database.go index 82e87ed089..eb48507582 100644 --- a/pkg/management/postgres/logicalimport/database.go +++ b/pkg/management/postgres/logicalimport/database.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport @@ -51,7 +54,7 @@ func (ds *databaseSnapshotter) getDatabaseList(ctx context.Context, target pool. if err != nil { return nil, err } - query := `SELECT datname FROM pg_database d WHERE datallowconn + query := `SELECT datname FROM pg_catalog.pg_database d WHERE datallowconn AND NOT datistemplate AND datallowconn AND datname != 'postgres' @@ -88,6 +91,7 @@ func (ds *databaseSnapshotter) exportDatabases( ctx context.Context, target pool.Pooler, databases []string, + extraOptions []string, ) error { contextLogger := log.FromContext(ctx) sectionsToExport := []string{} @@ -100,12 +104,13 @@ func (ds *databaseSnapshotter) exportDatabases( contextLogger.Info("exporting database", "databaseName", database) dsn := target.GetDsn(database) options := []string{ - "-Fc", + "-Fd", "-f", generateFileNameForDatabase(database), "-d", dsn, "-v", } options = append(options, sectionsToExport...) + options = append(options, extraOptions...) contextLogger.Info("Running pg_dump", "cmd", pgDump, "options", options) @@ -123,6 +128,7 @@ func (ds *databaseSnapshotter) importDatabases( ctx context.Context, target pool.Pooler, databases []string, + extraOptions []string, ) error { contextLogger := log.FromContext(ctx) @@ -156,6 +162,7 @@ func (ds *databaseSnapshotter) importDatabases( generateFileNameForDatabase(database), } + options = append(options, extraOptions...) options = append(options, alwaysPresentOptions...) contextLogger.Info("Running pg_restore", @@ -179,6 +186,7 @@ func (ds *databaseSnapshotter) importDatabaseContent( database string, targetDatabase string, owner string, + extraOptions []string, ) error { contextLogger := log.FromContext(ctx) @@ -204,7 +212,9 @@ func (ds *databaseSnapshotter) importDatabaseContent( "section", section, ) - options := []string{ + var options []string + + alwaysPresentOptions := []string{ "-U", "postgres", "--no-owner", "--no-privileges", @@ -214,6 +224,9 @@ func (ds *databaseSnapshotter) importDatabaseContent( generateFileNameForDatabase(database), } + options = append(options, extraOptions...) + options = append(options, alwaysPresentOptions...) + contextLogger.Info("Running pg_restore", "cmd", pgRestore, "options", options) @@ -321,7 +334,7 @@ func (ds *databaseSnapshotter) dropExtensionsFromDatabase( // In Postgres, OID 16384 is the first non system ID that can be used in the database // catalog, as defined in the `FirstNormalObjectId` constant (src/include/access/transam.h) - rows, err := db.QueryContext(ctx, "SELECT extname FROM pg_extension WHERE oid >= 16384") + rows, err := db.QueryContext(ctx, "SELECT extname FROM pg_catalog.pg_extension WHERE oid >= 16384") if err != nil { return err } diff --git a/pkg/management/postgres/logicalimport/database_test.go b/pkg/management/postgres/logicalimport/database_test.go index 426703e5d7..7ab009240a 100644 --- a/pkg/management/postgres/logicalimport/database_test.go +++ b/pkg/management/postgres/logicalimport/database_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport import ( - "context" "fmt" "github.com/DATA-DOG/go-sqlmock" @@ -32,14 +34,12 @@ import ( var _ = Describe("databaseSnapshotter methods test", func() { var ( - ctx context.Context ds databaseSnapshotter fp fakePooler mock sqlmock.Sqlmock ) BeforeEach(func() { - ctx = context.TODO() ds = databaseSnapshotter{ cluster: &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -109,13 +109,13 @@ var _ = Describe("databaseSnapshotter methods test", func() { } }) - It("should execute the query properly", func() { + It("should execute the query properly", func(ctx SpecContext) { mock.ExpectExec(createQuery).WillReturnResult(sqlmock.NewResult(0, 0)) err := ds.executePostImportQueries(ctx, fp, "test") Expect(err).ToNot(HaveOccurred()) }) - It("should return any error encountered", func() { + It("should return any error encountered", func(ctx SpecContext) { expectedErr := fmt.Errorf("will fail") mock.ExpectExec(createQuery).WillReturnError(expectedErr) err := ds.executePostImportQueries(ctx, fp, "test") @@ -123,7 +123,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { }) }) - It("should run analyze", func() { + It("should run analyze", func(ctx SpecContext) { mock.ExpectExec("ANALYZE VERBOSE").WillReturnResult(sqlmock.NewResult(0, 0)) err := ds.analyze(ctx, fp, []string{"test"}) Expect(err).ToNot(HaveOccurred()) @@ -133,10 +133,10 @@ var _ = Describe("databaseSnapshotter methods test", func() { var expectedQuery *sqlmock.ExpectedQuery BeforeEach(func() { - expectedQuery = mock.ExpectQuery("SELECT extname FROM pg_extension WHERE oid >= 16384") + expectedQuery = mock.ExpectQuery("SELECT extname FROM pg_catalog.pg_extension WHERE oid >= 16384") }) - It("should drop the user-defined extensions successfully", func() { + It("should drop the user-defined extensions successfully", func(ctx SpecContext) { extensions := []string{"extension1", "extension2"} rows := sqlmock.NewRows([]string{"extname"}) @@ -150,7 +150,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should correctly handle an error when querying for extensions", func() { + It("should correctly handle an error when querying for extensions", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") expectedQuery.WillReturnError(expectedErr) @@ -158,7 +158,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(err).To(Equal(expectedErr)) }) - It("should correctly handle an error when dropping an extension", func() { + It("should correctly handle an error when dropping an extension", func(ctx SpecContext) { rows := sqlmock.NewRows([]string{"extname"}).AddRow("extension1") expectedQuery.WillReturnRows(rows) @@ -172,7 +172,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { }) Context("getDatabaseList testing", func() { - const query = "SELECT datname FROM pg_database d " + + const query = "SELECT datname FROM pg_catalog.pg_database d " + "WHERE datallowconn AND NOT datistemplate AND datallowconn AND datname != 'postgres' " + "ORDER BY datname" @@ -184,7 +184,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { } }) - It("should return the explicit database list if present", func() { + It("should return the explicit database list if present", func(ctx SpecContext) { explicitDatabaseList := []string{"db1", "db2"} ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = explicitDatabaseList @@ -193,7 +193,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(dbs).To(Equal(explicitDatabaseList)) }) - It("should query for databases if explicit list is not present", func() { + It("should query for databases if explicit list is not present", func(ctx SpecContext) { expectedQuery := mock.ExpectQuery(query) ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = []string{"*"} @@ -209,7 +209,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(dbs).To(Equal(queryDatabaseList)) }) - It("should return any error encountered when querying for databases", func() { + It("should return any error encountered when querying for databases", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") expectedQuery := mock.ExpectQuery(query) ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = []string{"*"} diff --git a/pkg/management/postgres/logicalimport/doc.go b/pkg/management/postgres/logicalimport/doc.go index 9ba516d8be..49c11da4e5 100644 --- a/pkg/management/postgres/logicalimport/doc.go +++ b/pkg/management/postgres/logicalimport/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logicalimport contains the logic needed to import a logical snapshot diff --git a/pkg/management/postgres/logicalimport/microservice.go b/pkg/management/postgres/logicalimport/microservice.go index e84a28d843..de461ba189 100644 --- a/pkg/management/postgres/logicalimport/microservice.go +++ b/pkg/management/postgres/logicalimport/microservice.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport @@ -34,18 +37,29 @@ func Microservice( ) error { contextLogger := log.FromContext(ctx) ds := databaseSnapshotter{cluster: cluster} - databases := cluster.Spec.Bootstrap.InitDB.Import.Databases + initDB := cluster.Spec.Bootstrap.InitDB + databases := initDB.Import.Databases + contextLogger.Info("starting microservice clone process") if err := createDumpsDirectory(); err != nil { return nil } - if err := ds.exportDatabases(ctx, origin, databases); err != nil { + if err := ds.exportDatabases( + ctx, + origin, + databases, + initDB.Import.PgDumpExtraOptions, + ); err != nil { return err } - if err := ds.dropExtensionsFromDatabase(ctx, destination, cluster.Spec.Bootstrap.InitDB.Database); err != nil { + if err := ds.dropExtensionsFromDatabase( + ctx, + destination, + initDB.Database, + ); err != nil { return err } @@ -53,8 +67,9 @@ func Microservice( ctx, destination, databases[0], - cluster.Spec.Bootstrap.InitDB.Database, - cluster.Spec.Bootstrap.InitDB.Owner, + initDB.Database, + initDB.Owner, + initDB.Import.PgRestoreExtraOptions, ); err != nil { return err } @@ -63,9 +78,13 @@ func Microservice( return err } - if err := ds.executePostImportQueries(ctx, destination, cluster.Spec.Bootstrap.InitDB.Database); err != nil { + if err := ds.executePostImportQueries( + ctx, + destination, + initDB.Database, + ); err != nil { return err } - return ds.analyze(ctx, destination, []string{cluster.Spec.Bootstrap.InitDB.Database}) + return ds.analyze(ctx, destination, []string{initDB.Database}) } diff --git a/pkg/management/postgres/logicalimport/monolith.go b/pkg/management/postgres/logicalimport/monolith.go index f65b0260c7..017ef6e68d 100644 --- a/pkg/management/postgres/logicalimport/monolith.go +++ b/pkg/management/postgres/logicalimport/monolith.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport @@ -35,12 +38,13 @@ func Monolith( contextLogger := log.FromContext(ctx) contextLogger.Info("starting monolith clone process") - if err := cloneRoles(ctx, cluster, destination, origin); err != nil { - return err - } - - if err := cloneRoleInheritance(ctx, destination, origin); err != nil { - return err + if len(cluster.Spec.Bootstrap.InitDB.Import.Roles) > 0 { + if err := cloneRoles(ctx, cluster, destination, origin); err != nil { + return err + } + if err := cloneRoleInheritance(ctx, destination, origin); err != nil { + return err + } } ds := databaseSnapshotter{cluster: cluster} @@ -53,11 +57,21 @@ func Monolith( return err } - if err := ds.exportDatabases(ctx, origin, databases); err != nil { + if err := ds.exportDatabases( + ctx, + origin, + databases, + cluster.Spec.Bootstrap.InitDB.Import.PgDumpExtraOptions, + ); err != nil { return err } - if err := ds.importDatabases(ctx, destination, databases); err != nil { + if err := ds.importDatabases( + ctx, + destination, + databases, + cluster.Spec.Bootstrap.InitDB.Import.PgRestoreExtraOptions, + ); err != nil { return err } diff --git a/pkg/management/postgres/logicalimport/role.go b/pkg/management/postgres/logicalimport/role.go index d3eae34624..e7c4f5ee8f 100644 --- a/pkg/management/postgres/logicalimport/role.go +++ b/pkg/management/postgres/logicalimport/role.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport @@ -155,8 +158,8 @@ func (rs *roleManager) getRoles(ctx context.Context) ([]Role, error) { "rolcanlogin, rolconnlimit, rolpassword, " + "rolvaliduntil, rolreplication, rolbypassrls, " + "pg_catalog.shobj_description(oid, 'pg_authid') as rolcomment, " + - "rolname = current_user AS is_current_user " + - "FROM pg_authid " + + "rolname = CURRENT_USER AS is_current_user " + + "FROM pg_catalog.pg_authid " + "WHERE oid >= 16384 " + "ORDER BY 2" } else { @@ -166,8 +169,8 @@ func (rs *roleManager) getRoles(ctx context.Context) ([]Role, error) { "rolvaliduntil, rolreplication, " + "false as rolbypassrls, " + "pg_catalog.shobj_description(oid, 'pg_authid') as rolcomment, " + - "rolname = current_user AS is_current_user " + - "FROM pg_authid " + + "rolname = CURRENT_USER AS is_current_user " + + "FROM pg_catalog.pg_authid " + "WHERE oid >= 16384 " + "ORDER BY 2" } diff --git a/pkg/management/postgres/logicalimport/role_test.go b/pkg/management/postgres/logicalimport/role_test.go index 4d2d70d940..5c221f1f66 100644 --- a/pkg/management/postgres/logicalimport/role_test.go +++ b/pkg/management/postgres/logicalimport/role_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport import ( - "context" "fmt" "github.com/DATA-DOG/go-sqlmock" @@ -29,13 +31,12 @@ import ( var _ = Describe("", func() { const inhQuery = "SELECT ur.rolname AS roleid, um.rolname AS member, a.admin_option, ug.rolname AS grantor " + - "FROM pg_auth_members a LEFT JOIN pg_authid ur on ur.oid = a.roleid " + - "LEFT JOIN pg_authid um on um.oid = a.member " + - "LEFT JOIN pg_authid ug on ug.oid = a.grantor " + + "FROM pg_catalog.pg_auth_members a LEFT JOIN pg_catalog.pg_authid ur on ur.oid = a.roleid " + + "LEFT JOIN pg_catalog.pg_authid um on um.oid = a.member " + + "LEFT JOIN pg_catalog.pg_authid ug on ug.oid = a.grantor " + "WHERE ur.oid >= 16384 AND um.oid >= 16384" var ( - ctx context.Context fp fakePooler mock sqlmock.Sqlmock ri []RoleInheritance @@ -43,7 +44,6 @@ var _ = Describe("", func() { ) BeforeEach(func() { - ctx = context.TODO() db, dbMock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) mock = dbMock @@ -66,7 +66,7 @@ var _ = Describe("", func() { Expect(expectationErr).ToNot(HaveOccurred()) }) - It("should clone role inheritance successfully", func() { + It("should clone role inheritance successfully", func(ctx SpecContext) { // Define the RoleInheritance result for getRoleInheritance ri := []RoleInheritance{ { @@ -95,7 +95,7 @@ var _ = Describe("", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should return any error encountered when getting role inheritance", func() { + It("should return any error encountered when getting role inheritance", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") mock.ExpectQuery(inhQuery).WillReturnError(expectedErr) @@ -103,7 +103,7 @@ var _ = Describe("", func() { Expect(err).To(Equal(expectedErr)) }) - It("should import role inheritance successfully", func() { + It("should import role inheritance successfully", func(ctx SpecContext) { query := fmt.Sprintf(`GRANT %s TO %s WITH ADMIN OPTION GRANTED BY %s`, pgx.Identifier{ri[0].RoleID}.Sanitize(), pgx.Identifier{ri[0].Member}.Sanitize(), @@ -117,7 +117,7 @@ var _ = Describe("", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should return the correct role inheritances", func() { + It("should return the correct role inheritances", func(ctx SpecContext) { mock.ExpectQuery(inhQuery). WillReturnRows(sqlmock.NewRows([]string{"roleid", "member", "admin_option", "grantor"}). AddRow("role1", "member1", true, "grantor1")) @@ -127,7 +127,7 @@ var _ = Describe("", func() { Expect(ris).To(Equal(ri)) }) - It("should return any error encountered when getting role inheritances", func() { + It("should return any error encountered when getting role inheritances", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") mock.ExpectQuery(inhQuery).WillReturnError(expectedErr) @@ -135,7 +135,7 @@ var _ = Describe("", func() { Expect(err).To(Equal(expectedErr)) }) - It("should return any error encountered when scanning the result", func() { + It("should return any error encountered when scanning the result", func(ctx SpecContext) { mock.ExpectQuery(inhQuery).WillReturnRows(sqlmock.NewRows([]string{"wrongColumnName"}).AddRow("role1")) _, err := rm.getRoleInheritance(ctx) diff --git a/pkg/management/postgres/logicalimport/roleinheritance.go b/pkg/management/postgres/logicalimport/roleinheritance.go index 749ba1c33c..88bfcf4af4 100644 --- a/pkg/management/postgres/logicalimport/roleinheritance.go +++ b/pkg/management/postgres/logicalimport/roleinheritance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport @@ -98,10 +101,10 @@ func (rs *roleInheritanceManager) getRoleInheritance(ctx context.Context) ([]Rol "um.rolname AS member, " + "a.admin_option, " + "ug.rolname AS grantor " + - "FROM pg_auth_members a " + - "LEFT JOIN pg_authid ur on ur.oid = a.roleid " + - "LEFT JOIN pg_authid um on um.oid = a.member " + - "LEFT JOIN pg_authid ug on ug.oid = a.grantor " + + "FROM pg_catalog.pg_auth_members a " + + "LEFT JOIN pg_catalog.pg_authid ur on ur.oid = a.roleid " + + "LEFT JOIN pg_catalog.pg_authid um on um.oid = a.member " + + "LEFT JOIN pg_catalog.pg_authid ug on ug.oid = a.grantor " + "WHERE ur.oid >= 16384 AND um.oid >= 16384" rows, err := originDB.Query(query) diff --git a/pkg/management/postgres/logicalimport/roleinheritance_test.go b/pkg/management/postgres/logicalimport/roleinheritance_test.go index c652224755..75c22e2921 100644 --- a/pkg/management/postgres/logicalimport/roleinheritance_test.go +++ b/pkg/management/postgres/logicalimport/roleinheritance_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport @@ -66,9 +69,9 @@ var _ = Describe("RoleInheritanceManager", func() { AddRow("role2", "member2", false, nil) query := "SELECT ur\\.rolname AS roleid, um\\.rolname AS member, a\\.admin_option, ug\\.rolname AS grantor " + - "FROM pg_auth_members a LEFT JOIN pg_authid ur on ur\\.oid = a\\.roleid " + - "LEFT JOIN pg_authid um on um\\.oid = a\\.member " + - "LEFT JOIN pg_authid ug on ug\\.oid = a\\.grantor " + + "FROM pg_catalog.pg_auth_members a LEFT JOIN pg_catalog.pg_authid ur on ur\\.oid = a\\.roleid " + + "LEFT JOIN pg_catalog.pg_authid um on um\\.oid = a\\.member " + + "LEFT JOIN pg_catalog.pg_authid ug on ug\\.oid = a\\.grantor " + "WHERE ur\\.oid >= 16384 AND um\\.oid >= 16384" mock.ExpectQuery(query).WillReturnRows(rows) diff --git a/pkg/management/postgres/logicalimport/suite_test.go b/pkg/management/postgres/logicalimport/suite_test.go index 51acce5942..fd3f87b0af 100644 --- a/pkg/management/postgres/logicalimport/suite_test.go +++ b/pkg/management/postgres/logicalimport/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logpipe/CSVReadWriter.go b/pkg/management/postgres/logpipe/CSVReadWriter.go index 606f9b7e79..298d7e7958 100644 --- a/pkg/management/postgres/logpipe/CSVReadWriter.go +++ b/pkg/management/postgres/logpipe/CSVReadWriter.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe @@ -55,7 +58,7 @@ func (r *CSVRecordReadWriter) Read() ([]string, error) { for _, allowedFields := range r.allowedFieldsPerRecord { if len(record) == allowedFields { - r.Reader.FieldsPerRecord = allowedFields + r.FieldsPerRecord = allowedFields return record, nil } } diff --git a/pkg/management/postgres/logpipe/error.go b/pkg/management/postgres/logpipe/error.go index 6cb3ea9571..5321e69cac 100644 --- a/pkg/management/postgres/logpipe/error.go +++ b/pkg/management/postgres/logpipe/error.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/linelogpipe.go b/pkg/management/postgres/logpipe/linelogpipe.go index 1e8b716772..53bf53d2fc 100644 --- a/pkg/management/postgres/logpipe/linelogpipe.go +++ b/pkg/management/postgres/logpipe/linelogpipe.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logpipe implements reading csv logs from PostgreSQL logging_collector diff --git a/pkg/management/postgres/logpipe/loggingCollector.go b/pkg/management/postgres/logpipe/loggingCollector.go index 59f2b3d8f5..6d6fb34ae3 100644 --- a/pkg/management/postgres/logpipe/loggingCollector.go +++ b/pkg/management/postgres/logpipe/loggingCollector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/loggingCollector_test.go b/pkg/management/postgres/logpipe/loggingCollector_test.go index 8d7df45ce0..85d296d741 100644 --- a/pkg/management/postgres/logpipe/loggingCollector_test.go +++ b/pkg/management/postgres/logpipe/loggingCollector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/logpipe.go b/pkg/management/postgres/logpipe/logpipe.go index 81b6b2543a..0a7b63e68a 100644 --- a/pkg/management/postgres/logpipe/logpipe.go +++ b/pkg/management/postgres/logpipe/logpipe.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logpipe implements reading csv logs from PostgreSQL logging_collector diff --git a/pkg/management/postgres/logpipe/logpipe_test.go b/pkg/management/postgres/logpipe/logpipe_test.go index e05bd48708..1f62200338 100644 --- a/pkg/management/postgres/logpipe/logpipe_test.go +++ b/pkg/management/postgres/logpipe/logpipe_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe import ( - "context" "errors" "os" "strings" @@ -39,9 +41,7 @@ func (writer *SpyRecordWriter) Write(record NamedRecord) { var _ = Describe("CSV file reader", func() { When("given CSV logs from logging_collector", func() { - ctx := context.TODO() - - It("can read multiple CSV lines", func() { + It("can read multiple CSV lines", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines.csv") defer func() { _ = f.Close() @@ -57,23 +57,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read multiple CSV lines on PostgreSQL version <= 12", func() { - f, err := os.Open("testdata/two_lines_12.csv") - defer func() { - _ = f.Close() - }() - Expect(err).ToNot(HaveOccurred()) - - spy := SpyRecordWriter{} - p := LogPipe{ - record: &LoggingRecord{}, - fieldsValidator: LogFieldValidator, - } - Expect(p.streamLogFromCSVFile(ctx, f, &spy)).To(Succeed()) - Expect(spy.records).To(HaveLen(2)) - }) - - It("can read multiple CSV lines on PostgreSQL version == 14", func() { + It("can read multiple CSV lines on PostgreSQL version == 14", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines_14.csv") defer func() { _ = f.Close() @@ -89,7 +73,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read pgAudit CSV lines", func() { + It("can read pgAudit CSV lines", func(ctx SpecContext) { f, err := os.Open("testdata/pgaudit.csv") defer func() { _ = f.Close() @@ -110,7 +94,7 @@ var _ = Describe("CSV file reader", func() { Expect(err).ShouldNot(HaveOccurred()) input := strings.TrimRight(string(inputBuffer), " \n") - It("there are too many fields", func() { + It("there are too many fields", func(ctx SpecContext) { spy := SpyRecordWriter{} longerInput := input + ",test" @@ -128,7 +112,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(FieldsPerRecord13 + 1)) }) - It("there are not enough fields", func() { + It("there are not enough fields", func(ctx SpecContext) { spy := SpyRecordWriter{} shorterInput := "one,two,three" @@ -146,7 +130,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(3)) }) - It("there is a trailing comma", func() { + It("there is a trailing comma", func(ctx SpecContext) { spy := SpyRecordWriter{} trailingCommaInput := input + "," @@ -164,7 +148,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(FieldsPerRecord13 + 1)) }) - It("there is a wrong number of fields on a line that is not the first", func() { + It("there is a wrong number of fields on a line that is not the first", func(ctx SpecContext) { spy := SpyRecordWriter{} longerInput := input + "\none,two,three" @@ -183,7 +167,7 @@ var _ = Describe("CSV file reader", func() { }) }) - It("correctly handles an empty stream", func() { + It("correctly handles an empty stream", func(ctx SpecContext) { spy := SpyRecordWriter{} p := LogPipe{ record: &LoggingRecord{}, diff --git a/pkg/management/postgres/logpipe/pgaudit.go b/pkg/management/postgres/logpipe/pgaudit.go index 45a6bc8ed3..c46d14bb87 100644 --- a/pkg/management/postgres/logpipe/pgaudit.go +++ b/pkg/management/postgres/logpipe/pgaudit.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe @@ -64,7 +67,7 @@ func (r *PgAuditLoggingDecorator) FromCSV(content []string) NamedRecord { return r.LoggingRecord } - _, err := r.CSVReadWriter.Write([]byte(record)) + _, err := r.Write([]byte(record)) if err != nil { return r.LoggingRecord } @@ -73,7 +76,7 @@ func (r *PgAuditLoggingDecorator) FromCSV(content []string) NamedRecord { return r.LoggingRecord } - r.LoggingRecord.Message = "" + r.Message = "" r.Audit.fromCSV(auditContent) return r } diff --git a/pkg/management/postgres/logpipe/pgaudit_test.go b/pkg/management/postgres/logpipe/pgaudit_test.go index 5cf9729162..f8a8d17ef9 100644 --- a/pkg/management/postgres/logpipe/pgaudit_test.go +++ b/pkg/management/postgres/logpipe/pgaudit_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/record.go b/pkg/management/postgres/logpipe/record.go index 217da099d4..dcd221c7e0 100644 --- a/pkg/management/postgres/logpipe/record.go +++ b/pkg/management/postgres/logpipe/record.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/suite_test.go b/pkg/management/postgres/logpipe/suite_test.go index a37b1b0aca..86dee363e2 100644 --- a/pkg/management/postgres/logpipe/suite_test.go +++ b/pkg/management/postgres/logpipe/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/testdata/two_lines_12.csv b/pkg/management/postgres/logpipe/testdata/two_lines_12.csv deleted file mode 100644 index 84b39d6ef1..0000000000 --- a/pkg/management/postgres/logpipe/testdata/two_lines_12.csv +++ /dev/null @@ -1,2 +0,0 @@ -2021-05-10 06:25:24.239 UTC,,,9298,,601c20b5.2452,61853,,2021-02-04 16:28:37 UTC,,0,LOG,00000,"checkpoint starting: time",,,,,,,,,"" -2021-05-10 06:25:30.200 UTC,,,9298,,601c20b5.2452,61854,,2021-02-04 16:28:37 UTC,,0,LOG,00000,"checkpoint complete: wrote 59 buffers (0.0%); 0 WAL file(s) added, 0 removed, 1 recycled; write=5.937 s, sync=0.004 s, total=5.961 s; sync files=7, longest=0.002 s, average=0.000 s; distance=16415 kB, estimate=16910 kB",,,,,,,,,"" \ No newline at end of file diff --git a/pkg/management/postgres/logpipe/writer.go b/pkg/management/postgres/logpipe/writer.go index ec323ebfaa..07e1f91d53 100644 --- a/pkg/management/postgres/logpipe/writer.go +++ b/pkg/management/postgres/logpipe/writer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/metrics/collector.go b/pkg/management/postgres/metrics/collector.go index 4e8fb282cb..6dc1253491 100644 --- a/pkg/management/postgres/metrics/collector.go +++ b/pkg/management/postgres/metrics/collector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // This code is inspired on [postgres_exporter](https://github.com/prometheus-community/postgres_exporter) @@ -24,11 +27,17 @@ import ( "fmt" "path" "regexp" + "slices" + "time" "github.com/blang/semver" + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/prometheus/client_golang/prometheus" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics/histogram" postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" @@ -543,3 +552,120 @@ func (c QueryCollector) collectHistogramMetric( } ch <- metric } + +// PluginCollector is the interface for collecting metrics from plugins +type PluginCollector interface { + // Collect collects the metrics from the plugins + Collect(ctx context.Context, ch chan<- prometheus.Metric, cluster *apiv1.Cluster) error + // Describe describes the metrics from the plugins + Describe(ctx context.Context, ch chan<- *prometheus.Desc, cluster *apiv1.Cluster) +} + +type pluginCollector struct { + pluginRepository repository.Interface +} + +// NewPluginCollector creates a new PluginCollector that collects metrics from plugins +func NewPluginCollector( + pluginRepository repository.Interface, +) PluginCollector { + return &pluginCollector{pluginRepository: pluginRepository} +} + +func (p *pluginCollector) Describe(ctx context.Context, ch chan<- *prometheus.Desc, cluster *apiv1.Cluster) { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_describe") + + if len(p.getEnabledPluginNames(cluster)) == 0 { + contextLogger.Trace("No plugins enabled for metrics collection") + return + } + + cli, err := p.getClient(ctx, cluster) + if err != nil { + contextLogger.Error(err, "failed to get plugin client") + return + } + defer cli.Close(ctx) + + pluginsMetrics, err := cli.GetMetricsDefinitions(ctx, cluster) + if err != nil { + contextLogger.Error(err, "failed to get plugin metrics") + return + } + + for _, metric := range pluginsMetrics { + ch <- metric.Desc + } +} + +func (p *pluginCollector) Collect(ctx context.Context, ch chan<- prometheus.Metric, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_collect") + + if len(p.getEnabledPluginNames(cluster)) == 0 { + contextLogger.Trace("No plugins enabled for metrics collection") + return nil + } + + cli, err := p.getClient(ctx, cluster) + if err != nil { + return fmt.Errorf("failed to get plugin client: %w", err) + } + defer cli.Close(ctx) + + definitions, err := cli.GetMetricsDefinitions(ctx, cluster) + if err != nil { + return fmt.Errorf("failed to get plugin metrics during collect: %w", err) + } + + res, err := cli.CollectMetrics(ctx, cluster) + if err != nil { + return fmt.Errorf("failed to collect metrics from plugins: %w", err) + } + + return sendPluginMetrics(definitions, res, ch) +} + +func sendPluginMetrics( + definitions pluginClient.PluginMetricDefinitions, + metrics []*metrics.CollectMetric, + ch chan<- prometheus.Metric, +) error { + for _, metric := range metrics { + definition := definitions.Get(metric.FqName) + if definition == nil { + return fmt.Errorf("metric definition not found for fqName: %s", metric.FqName) + } + + m, err := prometheus.NewConstMetric(definition.Desc, definition.ValueType, metric.Value, metric.VariableLabels...) + if err != nil { + return fmt.Errorf("failed to create metric %s: %w", metric.FqName, err) + } + ch <- m + } + return nil +} + +func (p *pluginCollector) getClient(ctx context.Context, cluster *apiv1.Cluster) (pluginClient.Client, error) { + pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) + defer cancelPluginLoading() + + return pluginClient.WithPlugins( + pluginLoadingContext, + p.pluginRepository, + p.getEnabledPluginNames(cluster)..., + ) +} + +func (p *pluginCollector) getEnabledPluginNames(cluster *apiv1.Cluster) []string { + enabledPluginNames := cluster.GetInstanceEnabledPluginNames() + + // for backward compatibility, we also add the WAL archive plugin that initially didn't require + // INSTANCE_SIDECAR_INJECTION + if pluginWAL := cluster.GetEnabledWALArchivePluginName(); pluginWAL != "" { + if !slices.Contains(enabledPluginNames, pluginWAL) { + enabledPluginNames = append(enabledPluginNames, pluginWAL) + } + } + + return enabledPluginNames +} diff --git a/pkg/management/postgres/metrics/collector_test.go b/pkg/management/postgres/metrics/collector_test.go index 8ce178d225..acc2a71ab7 100644 --- a/pkg/management/postgres/metrics/collector_test.go +++ b/pkg/management/postgres/metrics/collector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,18 +13,23 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics import ( + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/prometheus/client_golang/prometheus" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var _ = Describe("Set default queries", func() { +var _ = Describe("Set default queries", Ordered, func() { q := NewQueriesCollector("test", nil, "db") It("does assign nothing with empty default queries", func() { @@ -147,3 +153,126 @@ var _ = Describe("QueryCollector tests", func() { }) }) }) + +var _ = Describe("sendPluginMetrics tests", func() { + It("should successfully send metrics when definitions and metrics match", func() { + ch := make(chan prometheus.Metric, 10) + desc := prometheus.NewDesc("test_metric", "test description", []string{"label1"}, nil) + definitions := pluginClient.PluginMetricDefinitions{ + pluginClient.PluginMetricDefinition{ + FqName: "test_metric", + Desc: desc, + ValueType: prometheus.CounterValue, + }, + } + + testMetrics := []*metrics.CollectMetric{ + { + FqName: "test_metric", + Value: 42.0, + VariableLabels: []string{"value1"}, + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).ToNot(HaveOccurred()) + Expect(ch).To(HaveLen(1)) + + // Verify the metric was sent + metric := <-ch + Expect(metric.Desc()).To(Equal(desc)) + }) + + It("should return error when metric definition is not found", func() { + ch := make(chan prometheus.Metric, 10) + definitions := pluginClient.PluginMetricDefinitions{} + testMetrics := []*metrics.CollectMetric{ + { + FqName: "missing_metric", + Value: 42.0, + VariableLabels: []string{"value1"}, + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("metric definition not found for fqName: missing_metric")) + Expect(ch).To(BeEmpty()) + }) + + It("should return error when prometheus metric creation fails", func() { + ch := make(chan prometheus.Metric, 10) + desc := prometheus.NewDesc("test_metric", "test description", []string{"label1", "label2"}, nil) + definitions := pluginClient.PluginMetricDefinitions{ + pluginClient.PluginMetricDefinition{ + FqName: "test_metric", + Desc: desc, + ValueType: prometheus.CounterValue, + }, + } + + // Create metric with wrong number of labels (should cause NewConstMetric to fail) + testMetrics := []*metrics.CollectMetric{ + { + FqName: "test_metric", + Value: 42.0, + VariableLabels: []string{"value1"}, // Only one label, but desc expects two + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create metric test_metric")) + Expect(ch).To(BeEmpty()) + }) + + It("should handle multiple metrics successfully", func() { + ch := make(chan prometheus.Metric, 10) + desc1 := prometheus.NewDesc("metric_one", "first metric", []string{"label1"}, nil) + desc2 := prometheus.NewDesc("metric_two", "second metric", []string{"label2"}, nil) + definitions := pluginClient.PluginMetricDefinitions{ + pluginClient.PluginMetricDefinition{ + FqName: "metric_one", + Desc: desc1, + ValueType: prometheus.CounterValue, + }, + pluginClient.PluginMetricDefinition{ + FqName: "metric_two", + Desc: desc2, + ValueType: prometheus.GaugeValue, + }, + } + + testMetrics := []*metrics.CollectMetric{ + { + FqName: "metric_one", + Value: 10.0, + VariableLabels: []string{"value1"}, + }, + { + FqName: "metric_two", + Value: 20.0, + VariableLabels: []string{"value2"}, + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).ToNot(HaveOccurred()) + Expect(ch).To(HaveLen(2)) + }) + + It("should handle empty metrics slice", func() { + ch := make(chan prometheus.Metric, 10) + definitions := pluginClient.PluginMetricDefinitions{} + var testMetrics []*metrics.CollectMetric + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).ToNot(HaveOccurred()) + Expect(ch).To(BeEmpty()) + }) +}) diff --git a/pkg/management/postgres/metrics/histogram/histogram.go b/pkg/management/postgres/metrics/histogram/histogram.go index 3223a188eb..4cb279176d 100644 --- a/pkg/management/postgres/metrics/histogram/histogram.go +++ b/pkg/management/postgres/metrics/histogram/histogram.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // This code is inspired on [postgres_exporter](https://github.com/prometheus-community/postgres_exporter) diff --git a/pkg/management/postgres/metrics/mapping_test.go b/pkg/management/postgres/metrics/mapping_test.go index 4b3b51dbda..08c120b024 100644 --- a/pkg/management/postgres/metrics/mapping_test.go +++ b/pkg/management/postgres/metrics/mapping_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/mappings.go b/pkg/management/postgres/metrics/mappings.go index 814ab520f2..20294c280f 100644 --- a/pkg/management/postgres/metrics/mappings.go +++ b/pkg/management/postgres/metrics/mappings.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/parser.go b/pkg/management/postgres/metrics/parser.go index aa36e6454f..2d598b11e6 100644 --- a/pkg/management/postgres/metrics/parser.go +++ b/pkg/management/postgres/metrics/parser.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metrics enables to expose a set of metrics and collectors on a given postgres instance diff --git a/pkg/management/postgres/metrics/parser_test.go b/pkg/management/postgres/metrics/parser_test.go index c185aede05..1ad1d4659a 100644 --- a/pkg/management/postgres/metrics/parser_test.go +++ b/pkg/management/postgres/metrics/parser_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/suite_test.go b/pkg/management/postgres/metrics/suite_test.go index d5b483093d..357af3edcb 100644 --- a/pkg/management/postgres/metrics/suite_test.go +++ b/pkg/management/postgres/metrics/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/pidfile.go b/pkg/management/postgres/pidfile.go index d5f1b4f7b2..41471e73a1 100644 --- a/pkg/management/postgres/pidfile.go +++ b/pkg/management/postgres/pidfile.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/pidfile_test.go b/pkg/management/postgres/pidfile_test.go index 2fddf281d9..460a61b13a 100644 --- a/pkg/management/postgres/pidfile_test.go +++ b/pkg/management/postgres/pidfile_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -49,7 +52,7 @@ var _ = Describe("the detection of a postmaster process using the pid file", fun instance := NewInstance() instance.PgData = pgdata instance.SocketDirectory = socketDir - process, err := instance.CheckForExistingPostmaster(postgresName) + process, err := instance.CheckForExistingPostmaster(GetPostgresExecutableName()) Expect(err).ShouldNot(HaveOccurred()) Expect(process).To(BeNil()) }) @@ -67,7 +70,7 @@ var _ = Describe("the detection of a postmaster process using the pid file", fun err = os.WriteFile(filepath.Join(socketDir, ".s.PGSQL.5432.lock"), []byte("1234"), 0o400) Expect(err).ShouldNot(HaveOccurred()) - process, err := instance.CheckForExistingPostmaster(postgresName) + process, err := instance.CheckForExistingPostmaster(GetPostgresExecutableName()) Expect(err).ShouldNot(HaveOccurred()) Expect(process).To(BeNil()) diff --git a/pkg/management/postgres/pool/connection.go b/pkg/management/postgres/pool/connection.go index ddfdb2181f..c642733ba9 100644 --- a/pkg/management/postgres/pool/connection.go +++ b/pkg/management/postgres/pool/connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/pool/pool.go b/pkg/management/postgres/pool/pool.go index f1ec6e5a8f..5125b23910 100644 --- a/pkg/management/postgres/pool/pool.go +++ b/pkg/management/postgres/pool/pool.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pool contain an implementation of a connection pool to multiple @@ -21,6 +24,7 @@ package pool import ( "database/sql" "fmt" + "sync" // this is needed to correctly open the sql connection with the pgx driver _ "github.com/jackc/pgx/v5/stdlib" @@ -48,7 +52,8 @@ type ConnectionPool struct { connectionProfile ConnectionProfile // A map of connection for every used database - connectionMap map[string]*sql.DB + connectionMap map[string]*sql.DB + connectionMapMutex sync.Mutex } // NewPostgresqlConnectionPool creates a new connectionMap of connections given @@ -75,6 +80,8 @@ func newConnectionPool(baseConnectionString string, connectionProfile Connection // Connection gets the connection for the given database func (pool *ConnectionPool) Connection(dbname string) (*sql.DB, error) { + pool.connectionMapMutex.Lock() + defer pool.connectionMapMutex.Unlock() if result, ok := pool.connectionMap[dbname]; ok { return result, nil } @@ -90,6 +97,9 @@ func (pool *ConnectionPool) Connection(dbname string) (*sql.DB, error) { // ShutdownConnections closes every database connection func (pool *ConnectionPool) ShutdownConnections() { + pool.connectionMapMutex.Lock() + defer pool.connectionMapMutex.Unlock() + for _, db := range pool.connectionMap { _ = db.Close() } @@ -97,7 +107,7 @@ func (pool *ConnectionPool) ShutdownConnections() { pool.connectionMap = make(map[string]*sql.DB) } -// newConnection creates a database connection connectionMap, connecting via +// newConnection creates a database connection, connecting via // Unix domain socket to a database with a certain name func (pool *ConnectionPool) newConnection(dbname string) (*sql.DB, error) { dsn := pool.GetDsn(dbname) diff --git a/pkg/management/postgres/pool/pool_test.go b/pkg/management/postgres/pool/pool_test.go index 3d3016d8ed..44345be63a 100644 --- a/pkg/management/postgres/pool/pool_test.go +++ b/pkg/management/postgres/pool/pool_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/pool/profiles.go b/pkg/management/postgres/pool/profiles.go index ca441a49d0..f6b4339f12 100644 --- a/pkg/management/postgres/pool/profiles.go +++ b/pkg/management/postgres/pool/profiles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/pool/suite_test.go b/pkg/management/postgres/pool/suite_test.go index 2a5c4479ce..16377c3b2b 100644 --- a/pkg/management/postgres/pool/suite_test.go +++ b/pkg/management/postgres/pool/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index b7e5852364..e1d3ffe3fa 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -21,7 +24,6 @@ import ( "errors" "fmt" "path/filepath" - "runtime" "strings" "github.com/cloudnative-pg/machinery/pkg/fileutils" @@ -36,24 +38,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) -// IsServerHealthy check if the instance is healthy -func (instance *Instance) IsServerHealthy() error { - err := PgIsReady() - - // A healthy server can also be actively rejecting connections. - // That's not a problem: it's only the server starting up or shutting - // down. - if errors.Is(err, ErrPgRejectingConnection) { - return nil - } - - return err -} - // GetStatus Extract the status of this PostgreSQL database func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err error) { result = &postgres.PostgresqlStatus{ - Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: instance.PodName}}, + Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: instance.GetPodName()}}, InstanceManagerVersion: versions.Version, MightBeUnavailable: instance.MightBeUnavailable(), } @@ -86,16 +74,30 @@ func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err er return result, err } + // Get the latest configuration hash from the PostgreSQL settings + rowConfigHash := superUserDB.QueryRow( + "SELECT setting FROM pg_catalog.pg_show_all_file_settings() WHERE name = $1", + postgres.CNPGConfigSha256) + if err := rowConfigHash.Scan(&result.LoadedConfigurationHash); err != nil { + if errors.Is(err, sql.ErrNoRows) { + // The applied configuration doesn't contain a CNPGConfigSha256 so probably it is not + // generated by CloudNativePG. This can occur if PostgreSQL starts with an old + // configuration before it is updated by the instance manager. This is not an issue as + // the correct configuration will be written soon. + result.LoadedConfigurationHash = "" + } else { + return result, err + } + } + row := superUserDB.QueryRow( `SELECT - (pg_control_system()).system_identifier, + (pg_catalog.pg_control_system()).system_identifier, -- True if this is a primary instance - NOT pg_is_in_recovery() as primary, + NOT pg_catalog.pg_is_in_recovery() as primary, -- True if at least one column requires a restart - EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart), - -- The size of database in human readable format - (SELECT pg_size_pretty(SUM(pg_database_size(oid))) FROM pg_database)`) - err = row.Scan(&result.SystemID, &result.IsPrimary, &result.PendingRestart, &result.TotalInstanceSize) + EXISTS(SELECT 1 FROM pg_catalog.pg_settings WHERE pending_restart)`) + err = row.Scan(&result.SystemID, &result.IsPrimary, &result.PendingRestart) if err != nil { return result, err } @@ -112,7 +114,7 @@ func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err er return result, err } - result.InstanceArch = runtime.GOARCH + result.InstanceArch = instance.GetArchitecture() result.ExecutableHash, err = executablehash.Get() if err != nil { @@ -183,7 +185,7 @@ FROM SELECT name, setting as current_setting, boot_val as default_setting - FROM pg_settings + FROM pg_catalog.pg_settings WHERE pending_restart ) pending_settings LEFT OUTER JOIN @@ -194,7 +196,7 @@ LEFT OUTER JOIN setting as new_setting, rank() OVER (PARTITION BY name ORDER BY seqno DESC) as rank, applied - FROM pg_file_settings + FROM pg_catalog.pg_file_settings ) c WHERE rank = 1 AND not applied ) file_settings @@ -277,9 +279,9 @@ func (instance *Instance) fillBasebackupStats( var basebackupList []postgres.PgStatBasebackup rows, err := superUserDB.Query(`SELECT - usename, - application_name, - backend_start, + usename, + application_name, + backend_start, phase, COALESCE(backup_total, 0) AS backup_total, COALESCE(backup_streamed, 0) AS backup_streamed, @@ -287,8 +289,8 @@ func (instance *Instance) fillBasebackupStats( COALESCE(pg_size_pretty(backup_streamed), '') AS backup_streamed_pretty, COALESCE(tablespaces_total, 0) AS tablespaces_total, COALESCE(tablespaces_streamed, 0) AS tablespaces_streamed - FROM pg_stat_progress_basebackup b - JOIN pg_stat_activity a USING (pid) + FROM pg_catalog.pg_stat_progress_basebackup b + JOIN pg_catalog.pg_stat_activity a USING (pid) WHERE application_name ~ '-join$' ORDER BY 1, 2`) if err != nil { @@ -337,9 +339,9 @@ func (instance *Instance) fillStatusFromPrimary(result *postgres.PostgresqlStatu ` SELECT (SELECT COALESCE(last_archived_wal, '') FROM pg_catalog.pg_stat_archiver), - pg_walfile_name(pg_current_wal_lsn()) as current_wal, - pg_current_wal_lsn(), - (SELECT timeline_id FROM pg_control_checkpoint()) as timeline_id + pg_catalog.pg_walfile_name(pg_catalog.pg_current_wal_lsn()) as current_wal, + pg_catalog.pg_current_wal_lsn(), + (SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()) as timeline_id `) err = row.Scan(&result.LastArchivedWAL, &result.CurrentWAL, @@ -388,7 +390,7 @@ func (instance *Instance) fillReplicationSlotsStatus(result *postgres.Postgresql } rows, err := superUserDB.Query( - `SELECT + `SELECT slot_name, coalesce(plugin::text, ''), coalesce(slot_type::text, ''), @@ -400,7 +402,7 @@ func (instance *Instance) fillReplicationSlotsStatus(result *postgres.Postgresql coalesce(restart_lsn::text, ''), coalesce(wal_status::text, ''), safe_wal_size - FROM pg_replication_slots`) + FROM pg_catalog.pg_replication_slots`) if err != nil { return err } @@ -470,7 +472,7 @@ func (instance *Instance) fillWalStatusFromConnection(result *postgres.Postgresq coalesce(sync_priority, 0) FROM pg_catalog.pg_stat_replication WHERE application_name ~ $1 AND usename = $2`, - fmt.Sprintf("%s-[0-9]+$", instance.ClusterName), + fmt.Sprintf("%s-[0-9]+$", instance.GetClusterName()), v1.StreamingReplicationUser, ) if err != nil { @@ -527,10 +529,10 @@ func (instance *Instance) fillStatusFromReplica(result *postgres.PostgresqlStatu // replicas row := superUserDB.QueryRow( "SELECT " + - "(SELECT timeline_id FROM pg_control_checkpoint()), " + - "COALESCE(pg_last_wal_receive_lsn()::varchar, ''), " + - "COALESCE(pg_last_wal_replay_lsn()::varchar, ''), " + - "pg_is_wal_replay_paused()") + "(SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()), " + + "COALESCE(pg_catalog.pg_last_wal_receive_lsn()::varchar, ''), " + + "COALESCE(pg_catalog.pg_last_wal_replay_lsn()::varchar, ''), " + + "pg_catalog.pg_is_wal_replay_paused()") if err := row.Scan(&result.TimeLineID, &result.ReceivedLsn, &result.ReplayLsn, &result.ReplayPaused); err != nil { return err } @@ -561,7 +563,7 @@ func (instance *Instance) IsWALReceiverActive() (bool, error) { return false, err } - row := superUserDB.QueryRow("SELECT COUNT(*) FROM pg_stat_wal_receiver") + row := superUserDB.QueryRow("SELECT COUNT(*) FROM pg_catalog.pg_stat_wal_receiver") err = row.Scan(&result) if err != nil { return false, err @@ -570,7 +572,7 @@ func (instance *Instance) IsWALReceiverActive() (bool, error) { return result, nil } -// PgStatWal is a representation of the pg_stat_wal table +// PgStatWal is a representation of the pg_stat_wal table, introduced in PostgreSQL 14. type PgStatWal struct { WalRecords int64 WalFpi int64 @@ -583,7 +585,7 @@ type PgStatWal struct { StatsReset string } -// TryGetPgStatWAL retrieves pg_wal_stat on pg version 14 and further +// TryGetPgStatWAL retrieves pg_stat_wal on pg version 14 and further func (instance *Instance) TryGetPgStatWAL() (*PgStatWal, error) { version, err := instance.GetPgVersion() if err != nil || version.Major < 14 { @@ -595,31 +597,56 @@ func (instance *Instance) TryGetPgStatWAL() (*PgStatWal, error) { return nil, err } + // Since PostgreSQL 18, `wal_write`, `wal_sync`, `wal_write_time` and + // `wal_sync_time` have been removed. + // See https://github.com/postgres/postgres/commit/2421e9a51d20bb83154e54a16ce628f9249fa907 var pgWalStat PgStatWal - row := superUserDB.QueryRow( - `SELECT - wal_records, + if version.Major < 18 { + row := superUserDB.QueryRow( + `SELECT + wal_records, + wal_fpi, + wal_bytes, + wal_buffers_full, + wal_write, + wal_sync, + wal_write_time, + wal_sync_time, + stats_reset + FROM pg_catalog.pg_stat_wal`) + if err := row.Scan( + &pgWalStat.WalRecords, + &pgWalStat.WalFpi, + &pgWalStat.WalBytes, + &pgWalStat.WALBuffersFull, + &pgWalStat.WalWrite, + &pgWalStat.WalSync, + &pgWalStat.WalWriteTime, + &pgWalStat.WalSyncTime, + &pgWalStat.StatsReset, + ); err != nil { + return nil, err + } + } + + if version.Major >= 18 { + row := superUserDB.QueryRow( + `SELECT + wal_records, wal_fpi, wal_bytes, wal_buffers_full, - wal_write, - wal_sync, - wal_write_time, - wal_sync_time, stats_reset - FROM pg_stat_wal`) - if err := row.Scan( - &pgWalStat.WalRecords, - &pgWalStat.WalFpi, - &pgWalStat.WalBytes, - &pgWalStat.WALBuffersFull, - &pgWalStat.WalWrite, - &pgWalStat.WalSync, - &pgWalStat.WalWriteTime, - &pgWalStat.WalSyncTime, - &pgWalStat.StatsReset, - ); err != nil { - return nil, err + FROM pg_catalog.pg_stat_wal`) + if err := row.Scan( + &pgWalStat.WalRecords, + &pgWalStat.WalFpi, + &pgWalStat.WalBytes, + &pgWalStat.WALBuffersFull, + &pgWalStat.StatsReset, + ); err != nil { + return nil, err + } } return &pgWalStat, nil diff --git a/pkg/management/postgres/probes_test.go b/pkg/management/postgres/probes_test.go index 514949c65a..627823e784 100644 --- a/pkg/management/postgres/probes_test.go +++ b/pkg/management/postgres/probes_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -90,18 +93,10 @@ var _ = Describe("probes", func() { }) Context("Fill basebackup stats", func() { - It("does nothing in case of that major version is less than 13 ", func() { - instance := &Instance{ - pgVersion: &semver.Version{Major: 12}, - } - Expect(instance.fillBasebackupStats(nil, nil)).To(Succeed()) - }) - It("set the information", func() { - instance := &Instance{ + instance := (&Instance{ pgVersion: &semver.Version{Major: 13}, - PodName: "test-1", - } + }).WithPodName("test-1") status := &postgres.PostgresqlStatus{ IsPrimary: false, } diff --git a/pkg/management/postgres/promote.go b/pkg/management/postgres/promote.go index 7a4440f5ea..0eed1ac216 100644 --- a/pkg/management/postgres/promote.go +++ b/pkg/management/postgres/promote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/readiness/readiness.go b/pkg/management/postgres/readiness/readiness.go deleted file mode 100644 index fc2d0f748b..0000000000 --- a/pkg/management/postgres/readiness/readiness.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package readiness - -import ( - "context" - "errors" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" -) - -// ErrStreamingReplicaNotConnected is raised for streaming replicas that never connected to its primary -var ErrStreamingReplicaNotConnected = errors.New("streaming replica was never connected to the primary node") - -// Data is the readiness checker structure -type Data struct { - instance *postgres.Instance - - streamingReplicaValidated bool -} - -// ForInstance creates a readiness checker for a certain instance -func ForInstance(instance *postgres.Instance) *Data { - return &Data{ - instance: instance, - } -} - -// IsServerReady check if the instance is healthy and can really accept connections -func (data *Data) IsServerReady(ctx context.Context) error { - if !data.instance.CanCheckReadiness() { - return errors.New("instance is not ready yet") - } - superUserDB, err := data.instance.GetSuperUserDB() - if err != nil { - return err - } - - // We now check if the database is ready to accept - // connections - if err := superUserDB.PingContext(ctx); err != nil { - return err - } - - // If we already validated this streaming replica, everything - // is fine - if data.streamingReplicaValidated { - return nil - } - - // If this is a streaming replica, meaning that - // primary_conninfo is not empty, we won't declare it ready - // unless it connected one time successfully to its primary. - // - // We check this because a streaming replica that was - // never connected to the primary could be incoherent, - // and we want users to notice this as soon as possible - row := superUserDB.QueryRowContext( - ctx, - ` - SELECT - NOT pg_is_in_recovery() - OR (SELECT coalesce(setting, '') = '' FROM pg_settings WHERE name = 'primary_conninfo') - OR pg_last_wal_replay_lsn() IS NOT NULL - `, - ) - if err := row.Err(); err != nil { - return err - } - - var status bool - if err := row.Scan(&status); err != nil { - return err - } - - if !status { - return ErrStreamingReplicaNotConnected - } - - data.streamingReplicaValidated = true - return nil -} diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index a4cf12afd0..affdeb6197 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -32,25 +35,29 @@ import ( "time" barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" - barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" + barmanUtils "github.com/cloudnative-pg/barman-cloud/pkg/utils" + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" + "github.com/cloudnative-pg/machinery/pkg/envmap" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" - postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -79,6 +86,7 @@ var ( ) // RestoreSnapshot restores a PostgreSQL cluster from a volumeSnapshot +// nolint:gocognit,gocyclo func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, immediate bool) error { contextLogger := log.FromContext(ctx) @@ -100,10 +108,10 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm // We're creating a new replica of an existing cluster, and the PVCs // have been initialized by a set of VolumeSnapshots. if immediate { - // If the instance will start as a primary, we will enter in the + // If the instance starts as a primary, we will enter in the // same logic attaching an old primary back after a failover. // We don't need that as this instance has never diverged. - if err := info.GetInstance().Demote(ctx, cluster); err != nil { + if err := info.GetInstance(nil).Demote(ctx, cluster); err != nil { return fmt.Errorf("error while demoting the instance: %w", err) } return nil @@ -137,19 +145,47 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm } } - backup, env, err := info.createBackupObjectForSnapshotRestore(ctx, cli, cluster) - if err != nil { - return err + var envs []string + restoreCmd := fmt.Sprintf( + "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", + postgresSpec.LogPath, postgresSpec.LogFileName) + config := fmt.Sprintf( + "recovery_target_action = promote\n"+ + "restore_command = '%s'\n", + restoreCmd) + + // nolint:nestif + if pluginConfiguration := cluster.GetRecoverySourcePlugin(); pluginConfiguration == nil { + envs, config, err = info.createEnvAndConfigForSnapshotRestore(ctx, cli, cluster) + if err != nil { + return err + } } if _, err := info.restoreCustomWalDir(ctx); err != nil { return err } - if err := info.WriteInitialPostgresqlConf(cluster); err != nil { + return info.concludeRestore(ctx, cli, cluster, config, envs) +} + +func (info InitInfo) concludeRestore( + ctx context.Context, + cli client.Client, + cluster *apiv1.Cluster, + config string, + envs []string, +) error { + if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { + return err + } + // we need a migration here, otherwise the server will not start up if + // we recover from a base which has postgresql.auto.conf + // the override.conf and include statement is present, what we need to do is to + // migrate the content + if _, err := info.GetInstance(cluster).migratePostgresAutoConfFile(ctx); err != nil { return err } - if cluster.IsReplica() { server, ok := cluster.ExternalCluster(cluster.Spec.ReplicaCluster.Source) if !ok { @@ -167,35 +203,35 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm return err } - if err := info.WriteRestoreHbaConf(); err != nil { + if err := info.WriteRestoreHbaConf(ctx); err != nil { return err } - if err := info.writeRestoreWalConfig(ctx, backup, cluster); err != nil { + if err := info.writeCustomRestoreWalConfig(cluster, config); err != nil { return err } - return info.ConfigureInstanceAfterRestore(ctx, cluster, env) + return info.ConfigureInstanceAfterRestore(ctx, cluster, envs) } -// createBackupObjectForSnapshotRestore creates a fake Backup object that can be used during the -// snapshot restore process -func (info InitInfo) createBackupObjectForSnapshotRestore( +// createEnvAndConfigForSnapshotRestore creates env and config for snapshot restore +func (info InitInfo) createEnvAndConfigForSnapshotRestore( ctx context.Context, typedClient client.Client, cluster *apiv1.Cluster, -) (*apiv1.Backup, []string, error) { +) ([]string, string, error) { + contextLogger := log.FromContext(ctx) sourceName := cluster.Spec.Bootstrap.Recovery.Source if sourceName == "" { - return nil, nil, fmt.Errorf("recovery source not specified") + return nil, "", fmt.Errorf("recovery source not specified") } - log.Info("Recovering from external cluster", "sourceName", sourceName) + contextLogger.Info("Recovering from external cluster", "sourceName", sourceName) server, found := cluster.ExternalCluster(sourceName) if !found { - return nil, nil, fmt.Errorf("missing external cluster: %v", sourceName) + return nil, "", fmt.Errorf("missing external cluster: %v", sourceName) } serverName := server.GetServerName() @@ -206,10 +242,10 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( server.BarmanObjectStore, os.Environ()) if err != nil { - return nil, nil, err + return nil, "", err } - return &apiv1.Backup{ + backup := &apiv1.Backup{ Spec: apiv1.BackupSpec{ Cluster: apiv1.LocalObjectReference{ Name: serverName, @@ -223,17 +259,17 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( ServerName: serverName, Phase: apiv1.BackupPhaseCompleted, }, - }, env, nil + } + + config, err := getRestoreWalConfig(ctx, backup) + return env, config, err } // Restore restores a PostgreSQL cluster from a backup into the object storage -func (info InitInfo) Restore(ctx context.Context) error { - typedClient, err := management.NewControllerRuntimeClient() - if err != nil { - return err - } +func (info InitInfo) Restore(ctx context.Context, cli client.Client) error { + contextLogger := log.FromContext(ctx) - cluster, err := info.loadCluster(ctx, typedClient) + cluster, err := info.loadCluster(ctx, cli) if err != nil { return err } @@ -248,67 +284,67 @@ func (info InitInfo) Restore(ctx context.Context) error { info.ApplicationDatabase = cluster.GetApplicationDatabaseName() } - // Before starting the restore we check if the archive destination is safe to use - // otherwise, we stop creating the cluster - err = info.checkBackupDestination(ctx, typedClient, cluster) - if err != nil { - return err - } - - // If we need to download data from a backup, we do it - backup, env, err := info.loadBackup(ctx, typedClient, cluster) - if err != nil { - return err - } + var envs []string + var config string - if err := info.ensureArchiveContainsLastCheckpointRedoWAL(ctx, cluster, env, backup); err != nil { - return err - } + // nolint:nestif + if pluginConfiguration := cluster.GetRecoverySourcePlugin(); pluginConfiguration != nil { + contextLogger.Info("Restore through plugin detected, proceeding...") + res, err := restoreViaPlugin(ctx, cluster, pluginConfiguration) + if err != nil { + return err + } + if res == nil { + return errors.New("empty response from restoreViaPlugin, programmatic error") + } - if err := info.restoreDataDir(ctx, backup, env); err != nil { - return err - } + processEnvironment, err := envmap.ParseEnviron() + if err != nil { + return fmt.Errorf("error while parsing the process environment: %w", err) + } - if _, err := info.restoreCustomWalDir(ctx); err != nil { - return err - } + pluginEnvironment, err := envmap.Parse(res.Envs) + if err != nil { + return fmt.Errorf("error while parsing the plugin environment: %w", err) + } - if err := info.WriteInitialPostgresqlConf(cluster); err != nil { - return err - } - // we need a migration here, otherwise the server will not start up if - // we recover from a base which has postgresql.auto.conf - // the override.conf and include statement is present, what we need to do is to - // migrate the content - if _, err := info.GetInstance().migratePostgresAutoConfFile(ctx); err != nil { - return err - } - if cluster.IsReplica() { - server, ok := cluster.ExternalCluster(cluster.Spec.ReplicaCluster.Source) - if !ok { - return fmt.Errorf("missing external cluster: %v", cluster.Spec.ReplicaCluster.Source) + envs = envmap.Merge(processEnvironment, pluginEnvironment).StringSlice() + config = res.RestoreConfig + } else { + // Before starting the restore we check if the archive destination is safe to use + // otherwise, we stop creating the cluster + err = info.checkBackupDestination(ctx, cli, cluster) + if err != nil { + return err } - connectionString, err := external.ConfigureConnectionToServer( - ctx, typedClient, info.Namespace, &server) + // If we need to download data from a backup, we do it + backup, env, err := info.loadBackup(ctx, cli, cluster) if err != nil { return err } - // TODO: Using a replication slot on replica cluster is not supported (yet?) - _, err = UpdateReplicaConfiguration(info.PgData, connectionString, "") - return err - } + if err := info.ensureArchiveContainsLastCheckpointRedoWAL(ctx, cluster, env, backup); err != nil { + return err + } - if err := info.WriteRestoreHbaConf(); err != nil { - return err - } + if err := info.restoreDataDir(ctx, backup, env); err != nil { + return err + } - if err := info.writeRestoreWalConfig(ctx, backup, cluster); err != nil { - return err + if _, err := info.restoreCustomWalDir(ctx); err != nil { + return err + } + + conf, err := getRestoreWalConfig(ctx, backup) + if err != nil { + return err + } + config = conf + envs = env } - return info.ConfigureInstanceAfterRestore(ctx, cluster, env) + return info.concludeRestore(ctx, cli, cluster, config, envs) } func (info InitInfo) ensureArchiveContainsLastCheckpointRedoWAL( @@ -392,6 +428,7 @@ func (info InitInfo) restoreCustomWalDir(ctx context.Context) (bool, error) { // restoreDataDir restores PGDATA from an existing backup func (info InitInfo) restoreDataDir(ctx context.Context, backup *apiv1.Backup, env []string) error { + contextLogger := log.FromContext(ctx) var options []string if backup.Status.EndpointURL != "" { @@ -408,22 +445,22 @@ func (info InitInfo) restoreDataDir(ctx context.Context, backup *apiv1.Backup, e options = append(options, info.PgData) - log.Info("Starting barman-cloud-restore", + contextLogger.Info("Starting barman-cloud-restore", "options", options) - cmd := exec.Command(barmanCapabilities.BarmanCloudRestore, options...) // #nosec G204 + cmd := exec.Command(barmanUtils.BarmanCloudRestore, options...) // #nosec G204 cmd.Env = env - err = execlog.RunStreaming(cmd, barmanCapabilities.BarmanCloudRestore) + err = execlog.RunStreaming(cmd, barmanUtils.BarmanCloudRestore) if err != nil { var exitError *exec.ExitError if errors.As(err, &exitError) { - err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(ctx, exitError.ExitCode()) + err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(exitError.ExitCode()) } - log.Error(err, "Can't restore backup") + contextLogger.Error(err, "Can't restore backup") return err } - log.Info("Restore completed") + contextLogger.Info("Restore completed") return nil } @@ -460,18 +497,24 @@ func (info InitInfo) loadBackupObjectFromExternalCluster( typedClient client.Client, cluster *apiv1.Cluster, ) (*apiv1.Backup, []string, error) { + contextLogger := log.FromContext(ctx) sourceName := cluster.Spec.Bootstrap.Recovery.Source if sourceName == "" { return nil, nil, fmt.Errorf("recovery source not specified") } - log.Info("Recovering from external cluster", "sourceName", sourceName) + contextLogger.Info("Recovering from external cluster", "sourceName", sourceName) server, found := cluster.ExternalCluster(sourceName) if !found { return nil, nil, fmt.Errorf("missing external cluster: %v", sourceName) } + + if server.BarmanObjectStore == nil { + return nil, nil, fmt.Errorf("missing barman object store configuration for source: %v", sourceName) + } + serverName := server.GetServerName() env, err := barmanCredentials.EnvSetRestoreCloudCredentials( @@ -506,7 +549,7 @@ func (info InitInfo) loadBackupObjectFromExternalCluster( return nil, nil, fmt.Errorf("no target backup found") } - log.Info("Target backup found", "backup", targetBackup) + contextLogger.Info("Target backup found", "backup", targetBackup) return &apiv1.Backup{ Spec: apiv1.BackupSpec{ @@ -541,6 +584,7 @@ func (info InitInfo) loadBackupFromReference( typedClient client.Client, cluster *apiv1.Cluster, ) (*apiv1.Backup, []string, error) { + contextLogger := log.FromContext(ctx) var backup apiv1.Backup err := typedClient.Get( ctx, @@ -566,21 +610,27 @@ func (info InitInfo) loadBackupFromReference( return nil, nil, err } - log.Info("Recovering existing backup", "backup", backup) + contextLogger.Info("Recovering existing backup", "backup", backup) return &backup, env, nil } -// writeRestoreWalConfig writes a `custom.conf` allowing PostgreSQL +func (info InitInfo) writeCustomRestoreWalConfig(cluster *apiv1.Cluster, conf string) error { + recoveryFileContents := fmt.Sprintf( + "%s\n"+ + "%s", + conf, + cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) + + return info.writeRecoveryConfiguration(cluster, recoveryFileContents) +} + +// getRestoreWalConfig obtains the content to append to `custom.conf` allowing PostgreSQL // to complete the WAL recovery from the object storage and then start // as a new primary -func (info InitInfo) writeRestoreWalConfig( - ctx context.Context, - backup *apiv1.Backup, - cluster *apiv1.Cluster, -) error { +func getRestoreWalConfig(ctx context.Context, backup *apiv1.Backup) (string, error) { var err error - cmd := []string{barmanCapabilities.BarmanCloudWalRestore} + cmd := []string{barmanUtils.BarmanCloudWalRestore} if backup.Status.EndpointURL != "" { cmd = append(cmd, "--endpoint-url", backup.Status.EndpointURL) } @@ -590,34 +640,28 @@ func (info InitInfo) writeRestoreWalConfig( cmd, err = barmanCommand.AppendCloudProviderOptionsFromBackup( ctx, cmd, backup.Status.BarmanCredentials) if err != nil { - return err + return "", err } cmd = append(cmd, "%f", "%p") recoveryFileContents := fmt.Sprintf( "recovery_target_action = promote\n"+ - "restore_command = '%s'\n"+ - "%s", - strings.Join(cmd, " "), - cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) + "restore_command = '%s'\n", + strings.Join(cmd, " ")) - return info.writeRecoveryConfiguration(cluster, recoveryFileContents) + return recoveryFileContents, nil } func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recoveryFileContents string) error { // Ensure restore_command is used to correctly recover WALs // from the object storage - major, err := postgresutils.GetMajorVersion(info.PgData) - if err != nil { - return fmt.Errorf("cannot detect major version: %w", err) - } log.Info("Generated recovery configuration", "configuration", recoveryFileContents) // Temporarily suspend WAL archiving. We set it to `false` (which means failure // of the archiver) in order to defer the decision about archiving to PostgreSQL // itself once the recovery job is completed and the instance is regularly started. - err = fileutils.AppendStringToFile( + err := fileutils.AppendStringToFile( path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile), "archive_command = 'false'\n") if err != nil { @@ -667,35 +711,27 @@ func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recovery return fmt.Errorf("cannot write recovery config for enforced parameters: %w", err) } - if major >= 12 { - // Append restore_command to the end of the - // custom configs file - err = fileutils.AppendStringToFile( - path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile), - recoveryFileContents) - if err != nil { - return fmt.Errorf("cannot write recovery config: %w", err) - } - - err = os.WriteFile( - path.Join(info.PgData, constants.PostgresqlOverrideConfigurationFile), - []byte(""), - 0o600) - if err != nil { - return fmt.Errorf("cannot erase auto config: %w", err) - } + // Append restore_command to the end of the + // custom configs file + err = fileutils.AppendStringToFile( + path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile), + recoveryFileContents) + if err != nil { + return fmt.Errorf("cannot write recovery config: %w", err) + } - // Create recovery signal file - return os.WriteFile( - path.Join(info.PgData, "recovery.signal"), - []byte(""), - 0o600) + err = os.WriteFile( + path.Join(info.PgData, constants.PostgresqlOverrideConfigurationFile), + []byte(""), + 0o600) + if err != nil { + return fmt.Errorf("cannot erase auto config: %w", err) } - // We need to generate a recovery.conf + // Create recovery signal file return os.WriteFile( - path.Join(info.PgData, "recovery.conf"), - []byte(recoveryFileContents), + path.Join(info.PgData, "recovery.signal"), + []byte(""), 0o600) } @@ -763,7 +799,8 @@ func LoadEnforcedParametersFromCluster( // WriteInitialPostgresqlConf resets the postgresql.conf that there is in the instance using // a new bootstrapped instance as reference -func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { +func (info InitInfo) WriteInitialPostgresqlConf(ctx context.Context, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx) if err := fileutils.EnsureDirectoryExists(postgresSpec.RecoveryTemporaryDirectory); err != nil { return err } @@ -775,12 +812,21 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { defer func() { err = os.RemoveAll(tempDataDir) if err != nil { - log.Error( + contextLogger.Error( err, "skipping error while deleting temporary data directory") } }() + enabledPluginNamesSet := stringset.From(cluster.GetJobEnabledPluginNames()) + pluginCli, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + return fmt.Errorf("error while creating the plugin client: %w", err) + } + defer pluginCli.Close(ctx) + ctx = pluginClient.SetPluginClientInContext(ctx, pluginCli) + ctx = cluster.SetInContext(ctx) + temporaryInitInfo := InitInfo{ PgData: tempDataDir, Temporary: true, @@ -790,19 +836,24 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { return fmt.Errorf("while creating a temporary data directory: %w", err) } - temporaryInstance := temporaryInitInfo.GetInstance() - temporaryInstance.Namespace = info.Namespace - temporaryInstance.ClusterName = info.ClusterName + temporaryInstance := temporaryInitInfo.GetInstance(cluster). + WithNamespace(info.Namespace). + WithClusterName(info.ClusterName) - _, err = temporaryInstance.RefreshPGHBA(cluster, "") + _, err = temporaryInstance.RefreshPGHBA(ctx, cluster, "") if err != nil { return fmt.Errorf("while generating pg_hba.conf: %w", err) } - _, err = temporaryInstance.RefreshPGIdent(cluster.Spec.PostgresConfiguration.PgIdent) + _, err = temporaryInstance.RefreshPGIdent(ctx, cluster.Spec.PostgresConfiguration.PgIdent) if err != nil { return fmt.Errorf("while generating pg_ident.conf: %w", err) } - _, err = temporaryInstance.RefreshConfigurationFilesFromCluster(cluster, false) + _, err = temporaryInstance.RefreshConfigurationFilesFromCluster( + ctx, + cluster, + false, + postgres.OperationType_TYPE_RESTORE, + ) if err != nil { return fmt.Errorf("while generating Postgres configuration: %w", err) } @@ -841,7 +892,7 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { // WriteRestoreHbaConf writes basic pg_hba.conf and pg_ident.conf allowing access without password from localhost. // This is needed to set the PostgreSQL password after the postgres server is started and active -func (info InitInfo) WriteRestoreHbaConf() error { +func (info InitInfo) WriteRestoreHbaConf(ctx context.Context) error { // We allow every access from localhost, and this is needed to correctly restore // the database _, err := fileutils.WriteStringToFile( @@ -852,7 +903,7 @@ func (info InitInfo) WriteRestoreHbaConf() error { } // Create only the local map referred in the HBA configuration - _, err = info.GetInstance().RefreshPGIdent(nil) + _, err = info.GetInstance(nil).RefreshPGIdent(ctx, nil) return err } @@ -863,7 +914,7 @@ func (info InitInfo) WriteRestoreHbaConf() error { func (info InitInfo) ConfigureInstanceAfterRestore(ctx context.Context, cluster *apiv1.Cluster, env []string) error { contextLogger := log.FromContext(ctx) - instance := info.GetInstance() + instance := info.GetInstance(cluster) instance.Env = env if err := instance.VerifyPgDataCoherence(ctx); err != nil { @@ -921,6 +972,7 @@ func (info *InitInfo) checkBackupDestination( client client.Client, cluster *apiv1.Cluster, ) error { + contextLogger := log.FromContext(ctx) if !cluster.Spec.Backup.IsBarmanBackupConfigured() { return nil } @@ -945,7 +997,7 @@ func (info *InitInfo) checkBackupDestination( env, postgresSpec.SpoolDirectory, info.PgData, - path.Join(info.PgData, CheckEmptyWalArchiveFile)) + path.Join(info.PgData, constants.CheckEmptyWalArchiveFile)) if err != nil { return fmt.Errorf("while creating the archiver: %w", err) } @@ -954,7 +1006,7 @@ func (info *InitInfo) checkBackupDestination( checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) if err != nil { - log.Error(err, "while getting barman-cloud-wal-archive options") + contextLogger.Error(err, "while getting barman-cloud-wal-archive options") return err } @@ -975,7 +1027,7 @@ func waitUntilRecoveryFinishes(db *sql.DB) error { } return retry.OnError(RetryUntilRecoveryDone, errorIsRetriable, func() error { - row := db.QueryRow("SELECT pg_is_in_recovery()") + row := db.QueryRow("SELECT pg_catalog.pg_is_in_recovery()") var status bool if err := row.Scan(&status); err != nil { @@ -992,3 +1044,27 @@ func waitUntilRecoveryFinishes(db *sql.DB) error { return nil }) } + +// restoreViaPlugin tries to restore the cluster using a plugin if available and enabled. +// Returns true if a restore plugin was found and any error encountered. +func restoreViaPlugin( + ctx context.Context, + cluster *apiv1.Cluster, + plugin *apiv1.PluginConfiguration, +) (*restore.RestoreResponse, error) { + contextLogger := log.FromContext(ctx) + + plugins := repository.New() + defer plugins.Close() + + pluginEnabledSet := stringset.New() + pluginEnabledSet.Put(plugin.Name) + pClient, err := pluginClient.NewClient(ctx, pluginEnabledSet) + if err != nil { + contextLogger.Error(err, "Error while loading required plugins") + return nil, err + } + defer pClient.Close(ctx) + + return pClient.Restore(ctx, cluster) +} diff --git a/pkg/management/postgres/restore_test.go b/pkg/management/postgres/restore_test.go index bd59455472..811b1a8b8b 100644 --- a/pkg/management/postgres/restore_test.go +++ b/pkg/management/postgres/restore_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres import ( - "context" "os" "path" @@ -44,13 +46,13 @@ var _ = Describe("testing restore InitInfo methods", func() { _ = fileutils.RemoveFile(tempDir) }) - It("should correctly restore a custom PgWal folder without data", func() { + It("should correctly restore a custom PgWal folder without data", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, } - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeTrue()) @@ -59,7 +61,7 @@ var _ = Describe("testing restore InitInfo methods", func() { Expect(exists).To(BeTrue()) }) - It("should correctly migrate an existing wal folder to the new one", func() { + It("should correctly migrate an existing wal folder to the new one", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, @@ -93,7 +95,7 @@ var _ = Describe("testing restore InitInfo methods", func() { }) By("executing the restore custom wal dir function", func() { - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeTrue()) }) @@ -120,7 +122,7 @@ var _ = Describe("testing restore InitInfo methods", func() { }) }) - It("should not do any changes if the symlink is already present", func() { + It("should not do any changes if the symlink is already present", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, @@ -135,16 +137,16 @@ var _ = Describe("testing restore InitInfo methods", func() { err = os.Symlink(newPgWal, pgWal) Expect(err).ToNot(HaveOccurred()) - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeFalse()) }) - It("should not do any changes if pgWal is not set", func() { + It("should not do any changes if pgWal is not set", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, } - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeFalse()) }) diff --git a/pkg/management/postgres/suite_test.go b/pkg/management/postgres/suite_test.go index 46c0674161..b602dcf53f 100644 --- a/pkg/management/postgres/suite_test.go +++ b/pkg/management/postgres/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,18 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres import ( + "database/sql" "testing" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -27,3 +33,37 @@ func TestPostgres(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "PostgreSQL instance manager test suite") } + +// mockInstance implements the minimum required functionality to test ConfigureNewInstance +type mockInstance struct { + superUserDB *sql.DB + templateDB *sql.DB + appDB *sql.DB +} + +type fakePooler struct { + db *sql.DB +} + +func (f fakePooler) Connection(_ string) (*sql.DB, error) { + return f.db, nil +} + +func (f fakePooler) GetDsn(dbName string) string { + return dbName +} + +func (f fakePooler) ShutdownConnections() { +} + +func (m *mockInstance) GetSuperUserDB() (*sql.DB, error) { + return m.superUserDB, nil +} + +func (m *mockInstance) GetTemplateDB() (*sql.DB, error) { + return m.templateDB, nil +} + +func (m *mockInstance) ConnectionPool() pool.Pooler { + return &fakePooler{db: m.appDB} +} diff --git a/pkg/management/postgres/utils/doc.go b/pkg/management/postgres/utils/doc.go index cb665b51a5..d052651a77 100644 --- a/pkg/management/postgres/utils/doc.go +++ b/pkg/management/postgres/utils/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils holds generic utils about postgres instances diff --git a/pkg/management/postgres/utils/roles.go b/pkg/management/postgres/utils/roles.go index 11dca63299..29d65a275c 100644 --- a/pkg/management/postgres/utils/roles.go +++ b/pkg/management/postgres/utils/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/roles_test.go b/pkg/management/postgres/utils/roles_test.go index 4355f4c900..b698d11e5e 100644 --- a/pkg/management/postgres/utils/roles_test.go +++ b/pkg/management/postgres/utils/roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/suite_test.go b/pkg/management/postgres/utils/suite_test.go index 0eb35b04a5..a2378b1405 100644 --- a/pkg/management/postgres/utils/suite_test.go +++ b/pkg/management/postgres/utils/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/utils.go b/pkg/management/postgres/utils/utils.go index 9b552050cb..5093f10900 100644 --- a/pkg/management/postgres/utils/utils.go +++ b/pkg/management/postgres/utils/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -132,7 +135,7 @@ func DBToString(t interface{}) (string, bool) { // GetAllAccessibleDatabases returns the list of all the accessible databases using the superuser func GetAllAccessibleDatabases(tx *sql.Tx, whereClause string) (databases []string, errors []error) { rows, err := tx.Query(strings.Join( - []string{"SELECT datname FROM pg_database", whereClause}, + []string{"SELECT datname FROM pg_catalog.pg_database", whereClause}, " WHERE "), ) if err != nil { diff --git a/pkg/management/postgres/utils/version.go b/pkg/management/postgres/utils/version.go index 23f99846e0..f7da31f517 100644 --- a/pkg/management/postgres/utils/version.go +++ b/pkg/management/postgres/utils/version.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -38,21 +41,21 @@ func GetPgVersion(db *sql.DB) (*semver.Version, error) { } func parseVersionNum(versionNum string) (*semver.Version, error) { - versionInt, err := strconv.Atoi(versionNum) + versionInt, err := strconv.ParseUint(versionNum, 10, 64) if err != nil { return nil, err } return &semver.Version{ - Major: uint64(versionInt / 10000), //nolint:gosec - Minor: uint64((versionInt / 100) % 100), //nolint:gosec - Patch: uint64(versionInt % 100), //nolint:gosec + Major: versionInt / 10000, + Minor: (versionInt / 100) % 100, + Patch: versionInt % 100, }, nil } -// GetMajorVersion read the PG_VERSION file in the data directory +// GetMajorVersionFromPgData read the PG_VERSION file in the data directory // returning the major version of the database -func GetMajorVersion(pgData string) (int, error) { +func GetMajorVersionFromPgData(pgData string) (int, error) { content, err := os.ReadFile(path.Join(pgData, "PG_VERSION")) // #nosec if err != nil { return 0, err diff --git a/pkg/management/postgres/utils/version_test.go b/pkg/management/postgres/utils/version_test.go index 52631e4ceb..69188ad9bc 100644 --- a/pkg/management/postgres/utils/version_test.go +++ b/pkg/management/postgres/utils/version_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/wal.go b/pkg/management/postgres/wal.go index 3338175597..0a3d486b8d 100644 --- a/pkg/management/postgres/wal.go +++ b/pkg/management/postgres/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -34,7 +37,7 @@ var errNoWalArchivePresent = errors.New("no wal-archive present") // On primary, it could run even before the first WAL has completed. For this reason it // could require a WAL switch, to quicken the check. // On standby, the mere existence of the standby guarantees that a WAL file has already been generated -// by the pg_basebakup used to prime the standby data directory, so we check only if the WAL +// by the pg_basebackup used to prime the standby data directory, so we check only if the WAL // archive process is not failing. func ensureWalArchiveIsWorking(instance *Instance) error { isPrimary, err := instance.IsPrimary() @@ -43,7 +46,7 @@ func ensureWalArchiveIsWorking(instance *Instance) error { } if isPrimary { - return newWalArchiveBootstrapperForPrimary().ensureFirstWalArchived(retryUntilWalArchiveWorking) + return newWalArchiveBootstrapperForPrimary().ensureFirstWalArchived(instance, retryUntilWalArchiveWorking) } return newWalArchiveAnalyzerForReplicaInstance(instance.GetPrimaryConnInfo()). @@ -62,7 +65,7 @@ func newWalArchiveAnalyzerForReplicaInstance(primaryConnInfo string) *walArchive dbFactory: func() (*sql.DB, error) { db, openErr := sql.Open( "pgx", - fmt.Sprintf("%s dbname=%s", primaryConnInfo, "postgres"), + primaryConnInfo, ) if openErr != nil { log.Error(openErr, "can not open postgres database") @@ -91,7 +94,7 @@ func (w *walArchiveAnalyzer) mustHaveFirstWalArchivedWithBackoff(backoff wait.Ba func (w *walArchiveAnalyzer) mustHaveFirstWalArchived(db *sql.DB) error { row := db.QueryRow("SELECT COALESCE(last_archived_time,'-infinity') > " + "COALESCE(last_failed_time, '-infinity') AS is_archiving, last_failed_time IS NOT NULL " + - "FROM pg_stat_archiver") + "FROM pg_catalog.pg_stat_archiver") var walArchivingWorking, lastFailedTimePresent bool @@ -143,8 +146,18 @@ func newWalArchiveBootstrapperForPrimary() *walArchiveBootstrapper { } } -func (w *walArchiveBootstrapper) ensureFirstWalArchived(backoff wait.Backoff) error { - return retry.OnError(backoff, resources.RetryAlways, func() error { +var errPrimaryDemoted = errors.New("primary was demoted while waiting for the first wal-archive") + +func (w *walArchiveBootstrapper) ensureFirstWalArchived(instance *Instance, backoff wait.Backoff) error { + return retry.OnError(backoff, func(err error) bool { return !errors.Is(err, errPrimaryDemoted) }, func() error { + isPrimary, err := instance.IsPrimary() + if err != nil { + return fmt.Errorf("error checking primary: %w", err) + } + if !isPrimary { + return errPrimaryDemoted + } + db, err := w.dbFactory() if err != nil { return err @@ -183,7 +196,7 @@ func (w *walArchiveBootstrapper) shipWalFile(db *sql.DB) error { return fmt.Errorf("error while requiring a checkpoint: %w", err) } - if _, err := db.Exec("SELECT pg_switch_wal()"); err != nil { + if _, err := db.Exec("SELECT pg_catalog.pg_switch_wal()"); err != nil { return fmt.Errorf("error while switching to a new WAL: %w", err) } diff --git a/pkg/management/postgres/wal_test.go b/pkg/management/postgres/wal_test.go index d1dc94a930..fd4e1b2970 100644 --- a/pkg/management/postgres/wal_test.go +++ b/pkg/management/postgres/wal_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -27,7 +30,7 @@ import ( ) var _ = Describe("ensure isWalArchiveWorking works correctly", func() { - const flexibleCoalescenceQuery = "SELECT COALESCE.*FROM pg_stat_archiver" + const flexibleCoalescenceQuery = "SELECT COALESCE.*FROM pg_catalog.pg_stat_archiver" var ( db *sql.DB mock sqlmock.Sqlmock @@ -75,7 +78,7 @@ var _ = Describe("ensure isWalArchiveWorking works correctly", func() { rows := sqlmock.NewRows([]string{"is_archiving", "last_failed_time_present"}).AddRow(false, false) mock.ExpectQuery(flexibleCoalescenceQuery).WillReturnRows(rows) mock.ExpectExec("CHECKPOINT").WillReturnResult(fakeResult) - mock.ExpectExec("SELECT pg_switch_wal()").WillReturnResult(fakeResult) + mock.ExpectExec("SELECT pg_catalog.pg_switch_wal()").WillReturnResult(fakeResult) // Call the function err := bootstrapper.mustHaveFirstWalArchived(db) diff --git a/pkg/management/postgres/webserver/backup_client.go b/pkg/management/postgres/webserver/backup_client.go deleted file mode 100644 index 549415d82c..0000000000 --- a/pkg/management/postgres/webserver/backup_client.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webserver - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" -) - -// backupClient a client to interact with the instance backup endpoints -type backupClient struct { - cli *http.Client -} - -// BackupClient is a struct capable of interacting with the instance backup endpoints -type BackupClient interface { - StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error) - Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error - Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error -} - -// NewBackupClient creates a client capable of interacting with the instance backup endpoints -func NewBackupClient() BackupClient { - const connectionTimeout = 2 * time.Second - const requestTimeout = 30 * time.Second - - return &backupClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)} -} - -// StatusWithErrors retrieves the current status of the backup. -// Returns the response body in case there is an error in the request -func (c *backupClient) StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error) { - scheme := instance.GetStatusSchemeFromPod(pod) - httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) - if err != nil { - return nil, err - } - - return executeRequestWithError[BackupResultData](ctx, c.cli, req, true) -} - -// Start runs the pg_start_backup -func (c *backupClient) Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error { - scheme := instance.GetStatusSchemeFromPod(pod) - httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - - // Marshalling the payload to JSON - jsonBody, err := json.Marshal(sbq) - if err != nil { - return fmt.Errorf("failed to marshal start payload: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody)) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - - _, err = executeRequestWithError[struct{}](ctx, c.cli, req, false) - return err -} - -// Stop runs the command pg_stop_backup -func (c *backupClient) Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error { - scheme := instance.GetStatusSchemeFromPod(pod) - httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - // Marshalling the payload to JSON - jsonBody, err := json.Marshal(sbq) - if err != nil { - return fmt.Errorf("failed to marshal stop payload: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody)) - if err != nil { - return err - } - _, err = executeRequestWithError[BackupResultData](ctx, c.cli, req, false) - return err -} - -func executeRequestWithError[T any]( - ctx context.Context, - cli *http.Client, - req *http.Request, - ignoreBodyErrors bool, -) (*Response[T], error) { - contextLogger := log.FromContext(ctx) - - resp, err := cli.Do(req) - if err != nil { - return nil, fmt.Errorf("while executing http request: %w", err) - } - - defer func() { - if err := resp.Body.Close(); err != nil { - contextLogger.Error(err, "while closing response body") - } - }() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("while reading the response body: %w", err) - } - - if resp.StatusCode == http.StatusInternalServerError { - return nil, fmt.Errorf("encountered an internal server error status code 500 with body: %s", string(body)) - } - - var result Response[T] - if err := json.Unmarshal(body, &result); err != nil { - return nil, fmt.Errorf("while unmarshalling the body, body: %s err: %w", string(body), err) - } - if result.Error != nil && !ignoreBodyErrors { - return nil, fmt.Errorf("body contained an error code: %s and message: %s", - result.Error.Code, result.Error.Message) - } - - return &result, nil -} diff --git a/pkg/management/postgres/webserver/backup_connection.go b/pkg/management/postgres/webserver/backup_connection.go index aae2fb2c72..53cb6beb4b 100644 --- a/pkg/management/postgres/webserver/backup_connection.go +++ b/pkg/management/postgres/webserver/backup_connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver @@ -57,7 +60,6 @@ const ( var replicationSlotInvalidCharacters = regexp.MustCompile(`[^a-z0-9_]`) type backupConnection struct { - sync sync.Mutex immediateCheckpoint bool waitForArchive bool conn *sql.Conn @@ -66,37 +68,6 @@ type backupConnection struct { err error } -func (bc *backupConnection) setPhase(phase BackupConnectionPhase, backupName string) { - bc.sync.Lock() - defer bc.sync.Unlock() - if backupName != bc.data.BackupName { - return - } - bc.data.Phase = phase -} - -func (bc *backupConnection) closeConnection(backupName string) error { - bc.sync.Lock() - defer bc.sync.Unlock() - if backupName != bc.data.BackupName { - return nil - } - - return bc.conn.Close() -} - -func (bc *backupConnection) executeWithLock(backupName string, cb func() error) { - bc.sync.Lock() - defer bc.sync.Unlock() - if backupName != bc.data.BackupName { - return - } - - if err := cb(); err != nil { - bc.err = err - } -} - func newBackupConnection( ctx context.Context, instance *postgres.Instance, @@ -132,8 +103,10 @@ func newBackupConnection( }, nil } -func (bc *backupConnection) startBackup(ctx context.Context, backupName string) { +func (bc *backupConnection) startBackup(ctx context.Context, sync *sync.Mutex) { contextLogger := log.FromContext(ctx).WithValues("step", "start") + sync.Lock() + defer sync.Unlock() if bc == nil { return @@ -145,7 +118,7 @@ func (bc *backupConnection) startBackup(ctx context.Context, backupName string) } contextLogger.Error(bc.err, "encountered error while starting backup") - if err := bc.closeConnection(backupName); err != nil { + if err := bc.conn.Close(); err != nil { if !errors.Is(err, sql.ErrConnDone) { contextLogger.Error(err, "while closing backup connection") } @@ -156,41 +129,42 @@ func (bc *backupConnection) startBackup(ctx context.Context, backupName string) slotName := replicationSlotInvalidCharacters.ReplaceAllString(bc.data.BackupName, "_") if _, err := bc.conn.ExecContext( ctx, - "SELECT pg_create_physical_replication_slot(slot_name => $1, immediately_reserve => true, temporary => true)", + "SELECT pg_catalog.pg_create_physical_replication_slot("+ + "slot_name => $1, immediately_reserve => true, temporary => true)", slotName, ); err != nil { - bc.err = fmt.Errorf("while creating the replication slot: %w", bc.err) + bc.err = fmt.Errorf("while creating the replication slot: %w", err) return } var row *sql.Row if bc.postgresMajorVersion < 15 { - row = bc.conn.QueryRowContext(ctx, "SELECT pg_start_backup($1, $2, false);", bc.data.BackupName, + row = bc.conn.QueryRowContext(ctx, "SELECT pg_catalog.pg_start_backup($1, $2, false);", bc.data.BackupName, bc.immediateCheckpoint) } else { - row = bc.conn.QueryRowContext(ctx, "SELECT pg_backup_start(label => $1, fast => $2);", bc.data.BackupName, + row = bc.conn.QueryRowContext(ctx, "SELECT pg_catalog.pg_backup_start(label => $1, fast => $2);", bc.data.BackupName, bc.immediateCheckpoint) } - bc.executeWithLock(backupName, func() error { - if err := row.Scan(&bc.data.BeginLSN); err != nil { - return fmt.Errorf("while scanning backup start: %w", err) - } - bc.data.Phase = Started + if err := row.Scan(&bc.data.BeginLSN); err != nil { + bc.err = fmt.Errorf("while scanning backup start: %w", err) + return + } - return nil - }) + bc.data.Phase = Started } -func (bc *backupConnection) stopBackup(ctx context.Context, backupName string) { +func (bc *backupConnection) stopBackup(ctx context.Context, sync *sync.Mutex) { contextLogger := log.FromContext(ctx).WithValues("step", "stop") + sync.Lock() + defer sync.Unlock() if bc == nil { return } defer func() { - if err := bc.closeConnection(backupName); err != nil { + if err := bc.conn.Close(); err != nil { if !errors.Is(err, sql.ErrConnDone) { contextLogger.Error(err, "while closing backup connection") } @@ -204,18 +178,17 @@ func (bc *backupConnection) stopBackup(ctx context.Context, backupName string) { var row *sql.Row if bc.postgresMajorVersion < 15 { row = bc.conn.QueryRowContext(ctx, - "SELECT lsn, labelfile, spcmapfile FROM pg_stop_backup(false, $1);", bc.waitForArchive) + "SELECT lsn, labelfile, spcmapfile FROM pg_catalog.pg_stop_backup(false, $1);", bc.waitForArchive) } else { row = bc.conn.QueryRowContext(ctx, - "SELECT lsn, labelfile, spcmapfile FROM pg_backup_stop(wait_for_archive => $1);", bc.waitForArchive) + "SELECT lsn, labelfile, spcmapfile FROM pg_catalog.pg_backup_stop(wait_for_archive => $1);", bc.waitForArchive) } - bc.executeWithLock(backupName, func() error { - if err := row.Scan(&bc.data.EndLSN, &bc.data.LabelFile, &bc.data.SpcmapFile); err != nil { - contextLogger.Error(err, "while stopping PostgreSQL physical backup") - return fmt.Errorf("while scanning backup stop: %w", err) - } - bc.data.Phase = Completed - return nil - }) + if err := row.Scan(&bc.data.EndLSN, &bc.data.LabelFile, &bc.data.SpcmapFile); err != nil { + contextLogger.Error(err, "while stopping PostgreSQL physical backup") + bc.err = fmt.Errorf("while scanning backup stop: %w", err) + return + } + + bc.data.Phase = Completed } diff --git a/pkg/resources/client.go b/pkg/management/postgres/webserver/client/common/client.go similarity index 89% rename from pkg/resources/client.go rename to pkg/management/postgres/webserver/client/common/client.go index f66cb51446..1a59d735af 100644 --- a/pkg/resources/client.go +++ b/pkg/management/postgres/webserver/client/common/client.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package resources +package common import ( "context" diff --git a/pkg/management/postgres/webserver/client/common/doc.go b/pkg/management/postgres/webserver/client/common/doc.go new file mode 100644 index 0000000000..1dda96faa4 --- /dev/null +++ b/pkg/management/postgres/webserver/client/common/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package common provides common utilities for the webserver client. +package common diff --git a/internal/management/cache/client/client.go b/pkg/management/postgres/webserver/client/local/cache.go similarity index 71% rename from internal/management/cache/client/client.go rename to pkg/management/postgres/webserver/client/local/cache.go index 4c3486c579..03cb8c70cb 100644 --- a/internal/management/cache/client/client.go +++ b/pkg/management/postgres/webserver/client/local/cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package client contains the constants and functions for reading supported objects from cache -// or building them in case of cache miss. -package client +package local import ( "encoding/json" @@ -32,9 +33,19 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" ) +// CacheClient is the interface to interact with the cache endpoints +type CacheClient interface { + GetCluster() (*apiv1.Cluster, error) + GetEnv(key string) ([]string, error) +} + +type cacheClientImpl struct { + cli *http.Client +} + // GetCluster gets the required cluster from cache -func GetCluster() (*apiv1.Cluster, error) { - bytes, err := httpCacheGet(cache.ClusterKey) +func (c *cacheClientImpl) GetCluster() (*apiv1.Cluster, error) { + bytes, err := c.httpCacheGet(cache.ClusterKey) if err != nil { return nil, err } @@ -49,8 +60,8 @@ func GetCluster() (*apiv1.Cluster, error) { } // GetEnv gets the environment variables from cache -func GetEnv(key string) ([]string, error) { - bytes, err := httpCacheGet(key) +func (c *cacheClientImpl) GetEnv(key string) ([]string, error) { + bytes, err := c.httpCacheGet(key) if err != nil { return nil, err } @@ -66,11 +77,11 @@ func GetEnv(key string) ([]string, error) { // httpCacheGet retrieves an object from the cache. // In case of failures it retries for a while before giving up -func httpCacheGet(urlPath string) ([]byte, error) { +func (c *cacheClientImpl) httpCacheGet(urlPath string) ([]byte, error) { var bytes []byte err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, func() error { var err error - bytes, err = get(urlPath) + bytes, err = c.get(urlPath) return err }) if err != nil { @@ -80,8 +91,8 @@ func httpCacheGet(urlPath string) ([]byte, error) { return bytes, nil } -func get(urlPath string) ([]byte, error) { - resp, err := http.Get(url.Local(url.PathCache+urlPath, url.LocalPort)) +func (c *cacheClientImpl) get(urlPath string) ([]byte, error) { + resp, err := c.cli.Get(url.Local(url.PathCache+urlPath, url.LocalPort)) if err != nil { return nil, err } diff --git a/pkg/management/postgres/webserver/client/local/cluster.go b/pkg/management/postgres/webserver/client/local/cluster.go new file mode 100644 index 0000000000..f04b253bd8 --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/cluster.go @@ -0,0 +1,74 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package local + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" +) + +// ClusterClient is the interface to interact with the uncategorized endpoints +type ClusterClient interface { + // SetWALArchiveStatusCondition sets the wal-archive status condition. + // An empty errMessage means that the archive process was successful. + // Returns any error encountered during the request. + SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error +} + +// clusterClientImpl a client to interact with the uncategorized endpoints +type clusterClientImpl struct { + cli *http.Client +} + +func (c *clusterClientImpl) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error { + contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition) + + asr := webserver.ArchiveStatusRequest{ + Error: errMessage, + } + + encoded, err := json.Marshal(&asr) + if err != nil { + return err + } + + resp, err := http.Post( + url.Local(url.PathWALArchiveStatusCondition, url.LocalPort), + "application/json", + bytes.NewBuffer(encoded), + ) + if err != nil { + return err + } + defer func() { + if errClose := resp.Body.Close(); errClose != nil { + contextLogger.Error(err, "while closing response body") + } + }() + + return nil +} diff --git a/pkg/management/postgres/webserver/client/local/doc.go b/pkg/management/postgres/webserver/client/local/doc.go new file mode 100644 index 0000000000..25f2e67749 --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package local provides a client to interact with the local webserver endpoints. +package local diff --git a/pkg/management/postgres/webserver/client/local/local.go b/pkg/management/postgres/webserver/client/local/local.go new file mode 100644 index 0000000000..9a61e2e34d --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/local.go @@ -0,0 +1,58 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package local + +import ( + "time" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" +) + +// Client is an entity capable of interacting with the local webserver endpoints +type Client interface { + Cache() CacheClient + Cluster() ClusterClient +} + +type localClient struct { + cache CacheClient + cluster ClusterClient +} + +// NewClient returns a new instance of Client +func NewClient() Client { + const connectionTimeout = 2 * time.Second + const requestTimeout = 30 * time.Second + + standardClient := common.NewHTTPClient(connectionTimeout, requestTimeout) + + return &localClient{ + cache: &cacheClientImpl{cli: standardClient}, + cluster: &clusterClientImpl{cli: standardClient}, + } +} + +func (c *localClient) Cache() CacheClient { + return c.cache +} + +func (c *localClient) Cluster() ClusterClient { + return c.cluster +} diff --git a/pkg/management/postgres/webserver/client/remote/backup.go b/pkg/management/postgres/webserver/client/remote/backup.go new file mode 100644 index 0000000000..d023d228ac --- /dev/null +++ b/pkg/management/postgres/webserver/client/remote/backup.go @@ -0,0 +1,114 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package remote + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + + corev1 "k8s.io/api/core/v1" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" +) + +// BackupClient is the interface to interact with the backup endpoints +type BackupClient interface { + StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*webserver.Response[webserver.BackupResultData], error) + Start( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StartBackupRequest, + ) (*webserver.Response[webserver.BackupResultData], error) + Stop( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StopBackupRequest, + ) (*webserver.Response[webserver.BackupResultData], error) +} + +// backupClientImpl a client to interact with the instance backup endpoints +type backupClientImpl struct { + cli *http.Client +} + +// StatusWithErrors retrieves the current status of the backup. +// Returns the response body in case there is an error in the request +func (c *backupClientImpl) StatusWithErrors( + ctx context.Context, + pod *corev1.Pod, +) (*webserver.Response[webserver.BackupResultData], error) { + scheme := GetStatusSchemeFromPod(pod) + httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, httpURL, nil) + if err != nil { + return nil, err + } + + return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true) +} + +// Start runs the pg_start_backup +func (c *backupClientImpl) Start( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StartBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { + scheme := GetStatusSchemeFromPod(pod) + httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) + + // Marshalling the payload to JSON + jsonBody, err := json.Marshal(sbq) + if err != nil { + return nil, fmt.Errorf("failed to marshal start payload: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, httpURL, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true) +} + +// Stop runs the command pg_stop_backup +func (c *backupClientImpl) Stop( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StopBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { + scheme := GetStatusSchemeFromPod(pod) + httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) + // Marshalling the payload to JSON + jsonBody, err := json.Marshal(sbq) + if err != nil { + return nil, fmt.Errorf("failed to marshal stop payload: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, httpURL, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true) +} diff --git a/pkg/management/postgres/webserver/client/remote/doc.go b/pkg/management/postgres/webserver/client/remote/doc.go new file mode 100644 index 0000000000..a49fd7381b --- /dev/null +++ b/pkg/management/postgres/webserver/client/remote/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package remote contains the client capable of querying the webserver remote endpoint. +package remote diff --git a/pkg/resources/instance/client.go b/pkg/management/postgres/webserver/client/remote/instance.go similarity index 85% rename from pkg/resources/instance/client.go rename to pkg/management/postgres/webserver/client/remote/instance.go index 1c80752e69..40b238fbe9 100644 --- a/pkg/resources/instance/client.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package instance +package remote import ( "context" @@ -34,11 +37,12 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) const ( @@ -55,8 +59,8 @@ var requestRetry = wait.Backoff{ Jitter: 0.1, } -// Client a http client capable of querying the instance HTTP endpoints -type Client interface { +// InstanceClient a http client capable of querying the instance HTTP endpoints +type InstanceClient interface { // GetStatusFromInstances gets the replication status from the PostgreSQL instances, // the returned list is sorted in order to have the primary as the first element // and the other instances in their election order @@ -83,7 +87,7 @@ type Client interface { ArchivePartialWAL(context.Context, *corev1.Pod) (string, error) } -type statusClient struct { +type instanceClientImpl struct { *http.Client } @@ -97,23 +101,21 @@ func (i StatusError) Error() string { return fmt.Sprintf("error status code: %v, body: %v", i.StatusCode, i.Body) } -// NewStatusClient returns a client capable of querying the instance HTTP endpoints -func NewStatusClient() Client { - const connectionTimeout = 2 * time.Second - const requestTimeout = 10 * time.Second - - return &statusClient{Client: resources.NewHTTPClient(connectionTimeout, requestTimeout)} -} - // extractInstancesStatus extracts the status of the underlying PostgreSQL instance from // the requested Pod, via the instance manager. In case of failure, errors are passed // in the result list -func (r statusClient) extractInstancesStatus( +func (r instanceClientImpl) extractInstancesStatus( ctx context.Context, activePods []corev1.Pod, ) postgres.PostgresqlStatusList { var result postgres.PostgresqlStatusList + cluster, ok := ctx.Value(contextutils.ContextKeyCluster).(*apiv1.Cluster) + if ok && cluster != nil { + result.IsReplicaCluster = cluster.IsReplica() + result.CurrentPrimary = cluster.Status.CurrentPrimary + } + for idx := range activePods { instanceStatus := r.getReplicaStatusFromPodViaHTTP(ctx, activePods[idx]) result.Items = append(result.Items, instanceStatus) @@ -123,7 +125,7 @@ func (r statusClient) extractInstancesStatus( // getReplicaStatusFromPodViaHTTP retrieves the status of PostgreSQL pod via HTTP, retrying // the request if some communication error is encountered -func (r *statusClient) getReplicaStatusFromPodViaHTTP( +func (r *instanceClientImpl) getReplicaStatusFromPodViaHTTP( ctx context.Context, pod corev1.Pod, ) (result postgres.PostgresqlStatus) { @@ -161,7 +163,7 @@ func (r *statusClient) getReplicaStatusFromPodViaHTTP( return result } -func (r *statusClient) GetStatusFromInstances( +func (r *instanceClientImpl) GetStatusFromInstances( ctx context.Context, pods corev1.PodList, ) postgres.PostgresqlStatusList { @@ -177,14 +179,14 @@ func (r *statusClient) GetStatusFromInstances( for idx := range status.Items { if status.Items[idx].Error != nil { log.FromContext(ctx).Info("Cannot extract Pod status", - "name", status.Items[idx].Pod.Name, + "podName", status.Items[idx].Pod.Name, "error", status.Items[idx].Error.Error()) } } return status } -func (r *statusClient) GetPgControlDataFromInstance( +func (r *instanceClientImpl) GetPgControlDataFromInstance( ctx context.Context, pod *corev1.Pod, ) (string, error) { @@ -192,12 +194,12 @@ func (r *statusClient) GetPgControlDataFromInstance( scheme := GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPGControlData, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, httpURL, nil) if err != nil { return "", err } - r.Client.Timeout = defaultRequestTimeout - resp, err := r.Client.Do(req) + r.Timeout = defaultRequestTimeout + resp, err := r.Do(req) if err != nil { return "", err } @@ -213,7 +215,7 @@ func (r *statusClient) GetPgControlDataFromInstance( return "", err } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return "", &StatusError{StatusCode: resp.StatusCode, Body: string(body)} } @@ -231,7 +233,7 @@ func (r *statusClient) GetPgControlDataFromInstance( } // UpgradeInstanceManager upgrades the instance manager to the passed availableArchitecture -func (r *statusClient) UpgradeInstanceManager( +func (r *instanceClientImpl) UpgradeInstanceManager( ctx context.Context, pod *corev1.Pod, availableArchitecture *utils.AvailableArchitecture, @@ -256,8 +258,8 @@ func (r *statusClient) UpgradeInstanceManager( } req.Body = binaryFileStream - r.Client.Timeout = noRequestTimeout - resp, err := r.Client.Do(req) + r.Timeout = noRequestTimeout + resp, err := r.Do(req) // This is the desired response. The instance manager will // synchronously update and this call won't return. if isEOF(err) { @@ -293,20 +295,20 @@ func isEOF(err error) bool { } // rawInstanceStatusRequest retrieves the status of PostgreSQL pods via an HTTP request with GET method. -func (r *statusClient) rawInstanceStatusRequest( +func (r *instanceClientImpl) rawInstanceStatusRequest( ctx context.Context, pod corev1.Pod, ) (result postgres.PostgresqlStatus) { scheme := GetStatusSchemeFromPod(&pod) statusURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgStatus, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", statusURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, statusURL, nil) if err != nil { result.Error = err return result } - r.Client.Timeout = defaultRequestTimeout - resp, err := r.Client.Do(req) + r.Timeout = defaultRequestTimeout + resp, err := r.Do(req) if err != nil { result.Error = err return result @@ -325,7 +327,7 @@ func (r *statusClient) rawInstanceStatusRequest( return result } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { result.Error = &StatusError{StatusCode: resp.StatusCode, Body: string(body)} return result } @@ -376,16 +378,16 @@ func GetStatusSchemeFromPod(pod *corev1.Pod) HTTPScheme { return schemeHTTP } -func (r *statusClient) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) (string, error) { +func (r *instanceClientImpl) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) (string, error) { contextLogger := log.FromContext(ctx) statusURL := url.Build( GetStatusSchemeFromPod(pod).ToString(), pod.Status.PodIP, url.PathPgArchivePartial, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "POST", statusURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, statusURL, nil) if err != nil { return "", err } - resp, err := r.Client.Do(req) + resp, err := r.Do(req) if err != nil { return "", err } @@ -401,7 +403,7 @@ func (r *statusClient) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) ( return "", err } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return "", &StatusError{StatusCode: resp.StatusCode, Body: string(body)} } diff --git a/pkg/management/postgres/webserver/client/remote/remote.go b/pkg/management/postgres/webserver/client/remote/remote.go new file mode 100644 index 0000000000..2a183f2bd6 --- /dev/null +++ b/pkg/management/postgres/webserver/client/remote/remote.go @@ -0,0 +1,56 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package remote + +import ( + "time" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" +) + +// Client is the interface to interact with the remote webserver +type Client interface { + Instance() InstanceClient + Backup() BackupClient +} + +type remoteClientImpl struct { + instance InstanceClient + backup *backupClientImpl +} + +func (r *remoteClientImpl) Backup() BackupClient { + return r.backup +} + +func (r *remoteClientImpl) Instance() InstanceClient { + return r.instance +} + +// NewClient creates a new remote client +func NewClient() Client { + const connectionTimeout = 2 * time.Second + const requestTimeout = 10 * time.Second + + return &remoteClientImpl{ + instance: &instanceClientImpl{Client: common.NewHTTPClient(connectionTimeout, requestTimeout)}, + backup: &backupClientImpl{cli: common.NewHTTPClient(connectionTimeout, requestTimeout)}, + } +} diff --git a/pkg/management/postgres/webserver/client/remote/request.go b/pkg/management/postgres/webserver/client/remote/request.go new file mode 100644 index 0000000000..506c105ee8 --- /dev/null +++ b/pkg/management/postgres/webserver/client/remote/request.go @@ -0,0 +1,73 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" +) + +// executeRequestWithError executes an http request and returns a webserver.response and any error encountered +func executeRequestWithError[T any]( + ctx context.Context, + cli *http.Client, + req *http.Request, + ignoreBodyErrors bool, +) (*webserver.Response[T], error) { + contextLogger := log.FromContext(ctx) + + resp, err := cli.Do(req) + if err != nil { + return nil, fmt.Errorf("while executing http request: %w", err) + } + + defer func() { + if err := resp.Body.Close(); err != nil { + contextLogger.Error(err, "while closing response body") + } + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("while reading the response body: %w", err) + } + + if resp.StatusCode == http.StatusInternalServerError { + return nil, fmt.Errorf("encountered an internal server error status code 500 with body: %s", string(body)) + } + + var result webserver.Response[T] + if err := json.Unmarshal(body, &result); err != nil { + return nil, fmt.Errorf("while unmarshalling the body, body: %s err: %w", string(body), err) + } + if result.Error != nil && !ignoreBodyErrors { + return nil, fmt.Errorf("body contained an error code: %s and message: %s", + result.Error.Code, result.Error.Message) + } + + return &result, nil +} diff --git a/pkg/management/postgres/webserver/doc.go b/pkg/management/postgres/webserver/doc.go index 52f44cd5e7..0449655e5e 100644 --- a/pkg/management/postgres/webserver/doc.go +++ b/pkg/management/postgres/webserver/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package webserver contains the web server powering probes, backups and metrics diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 89579382d6..80e046fcf7 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver @@ -26,6 +29,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,6 +37,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" ) type localWebserverEndpoints struct { @@ -56,6 +61,7 @@ func NewLocalWebServer( serveMux := http.NewServeMux() serveMux.HandleFunc(url.PathCache, endpoints.serveCache) serveMux.HandleFunc(url.PathPgBackup, endpoints.requestBackup) + serveMux.HandleFunc(url.PathWALArchiveStatusCondition, endpoints.setWALArchiveStatusCondition) server := &http.Server{ Addr: fmt.Sprintf("localhost:%d", url.LocalPort), @@ -78,12 +84,7 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req var js []byte switch requestedObject { case cache.ClusterKey: - var cluster apiv1.Cluster - err := ws.typedClient.Get( - r.Context(), - client.ObjectKey{Name: ws.instance.ClusterName, Namespace: ws.instance.Namespace}, - &cluster, - ) + cluster, err := ws.getCluster(r.Context()) if apierrs.IsNotFound(err) { w.WriteHeader(http.StatusNotFound) return @@ -93,7 +94,7 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req return } - js, err = json.Marshal(&cluster) + js, err = json.Marshal(cluster) if err != nil { log.Error(err, "while marshalling the cluster") w.WriteHeader(http.StatusInternalServerError) @@ -128,7 +129,6 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req // This function schedule a backup func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.Request) { - var cluster apiv1.Cluster var backup apiv1.Backup ctx := context.Background() @@ -139,10 +139,8 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. return } - if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.Namespace, - Name: ws.instance.ClusterName, - }, &cluster); err != nil { + cluster, err := ws.getCluster(ctx) + if err != nil { http.Error( w, fmt.Sprintf("error while getting cluster: %v", err.Error()), @@ -151,7 +149,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.Namespace, + Namespace: ws.instance.GetNamespaceName(), Name: backupName, }, &backup); err != nil { http.Error( @@ -168,7 +166,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. return } - if err := ws.startBarmanBackup(ctx, &cluster, &backup); err != nil { + if err := ws.startBarmanBackup(ctx, cluster, &backup); err != nil { http.Error( w, fmt.Sprintf("error while requesting backup: %v", err.Error()), @@ -183,7 +181,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. return } - ws.startPluginBackup(ctx, &cluster, &backup) + ws.startPluginBackup(ctx, cluster, &backup) _, _ = fmt.Fprint(w, "OK") default: @@ -194,6 +192,17 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } } +func (ws *localWebserverEndpoints) getCluster(ctx context.Context) (*apiv1.Cluster, error) { + var cluster apiv1.Cluster + if err := ws.typedClient.Get(ctx, client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), + }, &cluster); err != nil { + return nil, err + } + return &cluster, nil +} + func (ws *localWebserverEndpoints) startBarmanBackup( ctx context.Context, cluster *apiv1.Cluster, @@ -227,7 +236,66 @@ func (ws *localWebserverEndpoints) startPluginBackup( cluster *apiv1.Cluster, backup *apiv1.Backup, ) { - cmd := NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder) - cmd.Start(ctx) - cmd.Close() + NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx) +} + +// ArchiveStatusRequest is the request body for the archive status endpoint +type ArchiveStatusRequest struct { + Error string `json:"error,omitempty"` +} + +func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() metav1.Condition { + if asr.Error != "" { + return metav1.Condition{ + Type: string(apiv1.ConditionContinuousArchiving), + Status: metav1.ConditionFalse, + Reason: string(apiv1.ConditionReasonContinuousArchivingFailing), + Message: asr.Error, + } + } + + return metav1.Condition{ + Type: string(apiv1.ConditionContinuousArchiving), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess), + Message: "Continuous archiving is working", + } +} + +func (ws *localWebserverEndpoints) setWALArchiveStatusCondition(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + contextLogger := log.FromContext(ctx) + // decode body req + var asr ArchiveStatusRequest + if err := json.NewDecoder(r.Body).Decode(&asr); err != nil { + contextLogger.Error(err, "error while decoding request") + http.Error(w, fmt.Sprintf("error while decoding request: %v", err.Error()), http.StatusBadRequest) + return + } + + cluster, err := ws.getCluster(ctx) + if err != nil { + http.Error( + w, + fmt.Sprintf("error while getting cluster: %v", err.Error()), + http.StatusInternalServerError) + return + } + + if errCond := status.PatchConditionsWithOptimisticLock( + ctx, + ws.typedClient, + cluster, + asr.getContinuousArchivingCondition(), + ); errCond != nil { + contextLogger.Error(errCond, "Error changing wal archiving condition", + "condition", asr.getContinuousArchivingCondition()) + http.Error( + w, + fmt.Sprintf("error while updating wal archiving condition: %v", errCond.Error()), + http.StatusInternalServerError) + return + } + + _, _ = fmt.Fprint(w, "OK") } diff --git a/pkg/management/postgres/webserver/metricserver/doc.go b/pkg/management/postgres/webserver/metricserver/doc.go index 169837dcbc..8e2cfb22fc 100644 --- a/pkg/management/postgres/webserver/metricserver/doc.go +++ b/pkg/management/postgres/webserver/metricserver/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metricserver contains the web server powering metrics diff --git a/pkg/management/postgres/webserver/metricserver/metrics.go b/pkg/management/postgres/webserver/metricserver/metrics.go index 7e3ac339e7..454ed7f4ec 100644 --- a/pkg/management/postgres/webserver/metricserver/metrics.go +++ b/pkg/management/postgres/webserver/metricserver/metrics.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver @@ -63,7 +66,7 @@ func New(serverInstance *postgres.Instance, exporter *Exporter) (*MetricsServer, server.TLSConfig = &tls.Config{ MinVersion: tls.VersionTLS13, GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { - return serverInstance.ServerCertificate, nil + return serverInstance.GetServerCertificate(), nil }, } } diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 9bfd6ab53f..876b26bd06 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver import ( + "context" "database/sql" "errors" "fmt" @@ -29,9 +33,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" m "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" postgresconf "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) @@ -40,7 +44,7 @@ import ( // or the operator const PrometheusNamespace = "cnpg" -var synchronousStandbyNamesRegex = regexp.MustCompile(`ANY ([0-9]+) \(.*\)`) +var synchronousStandbyNamesRegex = regexp.MustCompile(`(?:ANY|FIRST) ([0-9]+) \(.*\)`) // Exporter exports a set of metrics and collectors on a given postgres instance type Exporter struct { @@ -51,6 +55,9 @@ type Exporter struct { // - to ensure we are able to unit test // - to make the struct adhere to the composition pattern instead of hardcoding dependencies inside the functions getCluster func() (*apiv1.Cluster, error) + + // pluginCollector is used to collect metrics from plugins + pluginCollector m.PluginCollector } // metrics here are related to the exporter itself, which is instrumented to @@ -88,11 +95,13 @@ type PgStatWalMetrics struct { } // NewExporter creates an exporter -func NewExporter(instance *postgres.Instance) *Exporter { +func NewExporter(instance *postgres.Instance, pluginCollector m.PluginCollector) *Exporter { + clusterGetter := local.NewClient().Cache().GetCluster return &Exporter{ - instance: instance, - Metrics: newMetrics(), - getCluster: cacheClient.GetCluster, + instance: instance, + Metrics: newMetrics(), + getCluster: clusterGetter, + pluginCollector: pluginCollector, } } @@ -173,19 +182,20 @@ func newMetrics() *metrics { Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "first_recoverability_point", - Help: "The first point of recoverability for the cluster as a unix timestamp", + Help: "The first point of recoverability for the cluster as a unix timestamp" + + " (Deprecated)", }), LastAvailableBackupTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "last_available_backup_timestamp", - Help: "The last available backup as a unix timestamp", + Help: "The last available backup as a unix timestamp (Deprecated)", }), LastFailedBackupTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "last_failed_backup_timestamp", - Help: "The last failed backup as a unix timestamp", + Help: "The last failed backup as a unix timestamp (Deprecated)", }), FencingOn: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -232,7 +242,8 @@ func newMetrics() *metrics { Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "wal_write", - Help: "Number of times WAL buffers were written out to disk via XLogWrite request. Only available on PG 14+", + Help: "Number of times WAL buffers were written out to disk via XLogWrite request." + + " Only available on PG 14 to 17.", }, []string{"stats_reset"}), WalSync: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -240,7 +251,7 @@ func newMetrics() *metrics { Name: "wal_sync", Help: "Number of times WAL files were synced to disk via issue_xlog_fsync request " + "(if fsync is on and wal_sync_method is either fdatasync, fsync or fsync_writethrough, otherwise zero)." + - " Only available on PG 14+", + " Only available on PG 14 to 17.", }, []string{"stats_reset"}), WalWriteTime: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -249,7 +260,7 @@ func newMetrics() *metrics { Help: "Total amount of time spent writing WAL buffers to disk via XLogWrite request, in milliseconds " + "(if track_wal_io_timing is enabled, otherwise zero). This includes the sync time when wal_sync_method " + "is either open_datasync or open_sync." + - " Only available on PG 14+", + " Only available on PG 14 to 17.", }, []string{"stats_reset"}), WalSyncTime: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -257,7 +268,7 @@ func newMetrics() *metrics { Name: "wal_sync_time", Help: "Total amount of time spent syncing WAL files to disk via issue_xlog_fsync request, in milliseconds " + "(if track_wal_io_timing is enabled, fsync is on, and wal_sync_method is either fdatasync, fsync or " + - "fsync_writethrough, otherwise zero). Only available on PG 14+", + "fsync_writethrough, otherwise zero). Only available on PG 14 to 17.", }, []string{"stats_reset"}), }, } @@ -287,14 +298,20 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { } if version, _ := e.instance.GetPgVersion(); version.Major >= 14 { - e.Metrics.PgStatWalMetrics.WalSync.Describe(ch) - e.Metrics.PgStatWalMetrics.WalWriteTime.Describe(ch) - e.Metrics.PgStatWalMetrics.WalFpi.Describe(ch) - e.Metrics.PgStatWalMetrics.WalWrite.Describe(ch) - e.Metrics.PgStatWalMetrics.WalSyncTime.Describe(ch) e.Metrics.PgStatWalMetrics.WalRecords.Describe(ch) - e.Metrics.PgStatWalMetrics.WALBuffersFull.Describe(ch) + e.Metrics.PgStatWalMetrics.WalFpi.Describe(ch) e.Metrics.PgStatWalMetrics.WalBytes.Describe(ch) + e.Metrics.PgStatWalMetrics.WALBuffersFull.Describe(ch) + if version.Major < 18 { + e.Metrics.PgStatWalMetrics.WalWrite.Describe(ch) + e.Metrics.PgStatWalMetrics.WalSync.Describe(ch) + e.Metrics.PgStatWalMetrics.WalWriteTime.Describe(ch) + e.Metrics.PgStatWalMetrics.WalSyncTime.Describe(ch) + } + } + + if cluster, _ := e.getCluster(); cluster != nil { + e.pluginCollector.Describe(context.Background(), ch, cluster) } } @@ -321,14 +338,24 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.Metrics.NodesUsed.Collect(ch) if version, _ := e.instance.GetPgVersion(); version.Major >= 14 { - e.Metrics.PgStatWalMetrics.WalSync.Collect(ch) - e.Metrics.PgStatWalMetrics.WalWriteTime.Collect(ch) - e.Metrics.PgStatWalMetrics.WalFpi.Collect(ch) - e.Metrics.PgStatWalMetrics.WalWrite.Collect(ch) - e.Metrics.PgStatWalMetrics.WalSyncTime.Collect(ch) e.Metrics.PgStatWalMetrics.WalRecords.Collect(ch) - e.Metrics.PgStatWalMetrics.WALBuffersFull.Collect(ch) + e.Metrics.PgStatWalMetrics.WalFpi.Collect(ch) e.Metrics.PgStatWalMetrics.WalBytes.Collect(ch) + e.Metrics.PgStatWalMetrics.WALBuffersFull.Collect(ch) + if version.Major < 18 { + e.Metrics.PgStatWalMetrics.WalWrite.Collect(ch) + e.Metrics.PgStatWalMetrics.WalSync.Collect(ch) + e.Metrics.PgStatWalMetrics.WalWriteTime.Collect(ch) + e.Metrics.PgStatWalMetrics.WalSyncTime.Collect(ch) + } + } + + if cluster, _ := e.getCluster(); cluster != nil { + if err := e.pluginCollector.Collect(context.Background(), ch, cluster); err != nil { + log.Error(err, "error while collecting plugin metrics") + e.Metrics.Error.Set(1) + e.Metrics.PgCollectionErrors.WithLabelValues("Collect.PluginMetrics").Inc() + } } } @@ -358,13 +385,13 @@ func (e *Exporter) collectPgMetrics(ch chan<- prometheus.Metric) { // First, let's check the connection. No need to proceed if this fails. if err := db.Ping(); err != nil { log.Warning("Unable to collect metrics", "error", err) - e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.ClusterName).Set(0) + e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.GetClusterName()).Set(0) e.Metrics.Error.Set(1) e.Metrics.CollectionDuration.WithLabelValues("Collect.up").Set(time.Since(collectionStart).Seconds()) return } - e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.ClusterName).Set(1) + e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.GetClusterName()).Set(1) e.Metrics.Error.Set(0) e.Metrics.CollectionDuration.WithLabelValues("Collect.up").Set(time.Since(collectionStart).Seconds()) @@ -423,8 +450,8 @@ func (e *Exporter) collectPgMetrics(ch chan<- prometheus.Metric) { } if version, _ := e.instance.GetPgVersion(); version.Major >= 14 { - if err := collectPGWALStat(e); err != nil { - log.Error(err, "while collecting pg_wal_stat") + if err := collectPGStatWAL(e); err != nil { + log.Error(err, "while collecting pg_stat_wal") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues("Collect.PGWALStat").Inc() } @@ -498,26 +525,26 @@ func (e *Exporter) collectNodesUsed() { func (e *Exporter) collectFromPrimaryLastFailedBackupTimestamp() { const errorLabel = "Collect.LastFailedBackupTimestamp" e.setTimestampMetric(e.Metrics.LastFailedBackupTimestamp, errorLabel, func(cluster *apiv1.Cluster) string { - return cluster.Status.LastFailedBackup + return cluster.Status.LastFailedBackup //nolint:staticcheck }) } func (e *Exporter) collectFromPrimaryLastAvailableBackupTimestamp() { const errorLabel = "Collect.LastAvailableBackupTimestamp" e.setTimestampMetric(e.Metrics.LastAvailableBackupTimestamp, errorLabel, func(cluster *apiv1.Cluster) string { - return cluster.Status.LastSuccessfulBackup + return cluster.Status.LastSuccessfulBackup //nolint:staticcheck }) } func (e *Exporter) collectFromPrimaryFirstPointOnTimeRecovery() { const errorLabel = "Collect.FirstRecoverabilityPoint" e.setTimestampMetric(e.Metrics.FirstRecoverabilityPoint, errorLabel, func(cluster *apiv1.Cluster) string { - return cluster.Status.FirstRecoverabilityPoint + return cluster.Status.FirstRecoverabilityPoint //nolint:staticcheck }) } func (e *Exporter) collectFromPrimarySynchronousStandbysNumber(db *sql.DB) { - nStandbys, err := getSynchronousStandbysNumber(db) + nStandbys, err := getRequestedSynchronousStandbysNumber(db) if err != nil { log.Error(err, "unable to collect metrics") e.Metrics.Error.Set(1) @@ -541,12 +568,15 @@ func collectPGVersion(e *Exporter) error { if err != nil { return err } - e.Metrics.PgVersion.WithLabelValues(majorMinor, e.instance.ClusterName).Set(version) + e.Metrics.PgVersion.WithLabelValues(majorMinor, e.instance.GetClusterName()).Set(version) return nil } -func getSynchronousStandbysNumber(db *sql.DB) (int, error) { +// getRequestedSynchronousStandbysNumber returns the number of requested synchronous standbys +// Example: FIRST 2 (node1,node2) will return 2, ANY 4 (node1) will return 4. +// If the query fails, it will return 0 and an error. +func getRequestedSynchronousStandbysNumber(db *sql.DB) (int, error) { var syncReplicasFromConfig string err := db.QueryRow(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)). Scan(&syncReplicasFromConfig) @@ -580,8 +610,8 @@ func (e *Exporter) SetCustomQueries(queries *m.QueriesCollector) { // DefaultQueries is the set of default queries for postgresql var DefaultQueries = m.UserQueries{ "collector": m.UserQuery{ - Query: "SELECT current_database() as datname, relpages as lo_pages " + - "FROM pg_class c JOIN pg_namespace n ON (n.oid = c.relnamespace) " + + Query: "SELECT pg_catalog.current_database() as datname, relpages as lo_pages " + + "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON (n.oid = c.relnamespace) " + "WHERE n.nspname = 'pg_catalog' AND c.relname = 'pg_largeobject';", TargetDatabases: []string{"*"}, Metrics: []m.Mapping{ diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go index 509c5e297f..dd5c24eef9 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver import ( + "context" "fmt" "time" @@ -33,13 +37,22 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("ensure timestamp metric it's set properly", func() { +type fakePluginCollector struct{} + +func (f fakePluginCollector) Collect(context.Context, chan<- prometheus.Metric, *apiv1.Cluster) error { + return nil +} + +func (f fakePluginCollector) Describe(context.Context, chan<- *prometheus.Desc, *apiv1.Cluster) { +} + +var _ = Describe("test metrics parsing", func() { var exporter *Exporter BeforeEach(func() { cache.Delete(cache.ClusterKey) instance := postgres.NewInstance() - exporter = NewExporter(instance) + exporter = NewExporter(instance, fakePluginCollector{}) }) It("fails if there's no cluster in the cache", func() { @@ -97,12 +110,12 @@ var _ = Describe("ensure timestamp metric it's set properly", func() { } }) - It("It correctly parse the sync replicas", func() { + It("correctly parses the number of sync replicas when quorum-based", func() { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) rows := sqlmock.NewRows([]string{"synchronous_standby_names"}). - AddRow("ANY 2 ( \"cluster-example-2\",\"cluster-example-3\")") + AddRow(`ANY 2 ( "cluster-example-2","cluster-example-3")`) mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) exporter.collectFromPrimarySynchronousStandbysNumber(db) @@ -117,12 +130,58 @@ var _ = Describe("ensure timestamp metric it's set properly", func() { } }) - It("register -1 in case it can't parse the sync replicas string", func() { + It("correctly parses the number of sync replicas when preferential", func() { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + rows := sqlmock.NewRows([]string{"synchronous_standby_names"}). + AddRow(`FIRST 2 ( "cluster-example-2","cluster-example-3")`) + mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) + + exporter.collectFromPrimarySynchronousStandbysNumber(db) + + registry := prometheus.NewRegistry() + registry.MustRegister(exporter.Metrics.SyncReplicas) + metrics, _ := registry.Gather() + + for _, metric := range metrics { + m := metric.GetMetric() + Expect(m[0].GetGauge().GetValue()).To(BeEquivalentTo(2)) + } + }) + + It("should return an error when encountering unexpected results", func() { + By("not matching the synchronous standby names regex", func() { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + // This row will generate only two strings in the array + rows := sqlmock.NewRows([]string{"synchronous_standby_names"}).AddRow("ANY q (xx)") + mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) + _, err = getRequestedSynchronousStandbysNumber(db) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not matching synchronous standby names regex: ANY q (xx)")) + }) + + By("not matching the number of sync replicas", func() { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + // This row will generate only two strings in the array + rows := sqlmock.NewRows([]string{"synchronous_standby_names"}).AddRow("ANY 2 (xx, ") + mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) + _, err = getRequestedSynchronousStandbysNumber(db) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not matching synchronous standby names regex: ANY 2 (xx")) + }) + }) + + It("sets the number of sync replicas as -1 if it can't parse the sync replicas string", func() { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) rows := sqlmock.NewRows([]string{"synchronous_standby_names"}). - AddRow("( \"cluster-example-2\",\"cluster-example-3\")") + AddRow(`( "cluster-example-2","cluster-example-3")`) mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) exporter.collectFromPrimarySynchronousStandbysNumber(db) diff --git a/pkg/management/postgres/webserver/metricserver/suite_test.go b/pkg/management/postgres/webserver/metricserver/suite_test.go index c4a0eab15b..c5dc36e28f 100644 --- a/pkg/management/postgres/webserver/metricserver/suite_test.go +++ b/pkg/management/postgres/webserver/metricserver/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go index 50280a21cc..4ced92fd05 100644 --- a/pkg/management/postgres/webserver/metricserver/wal.go +++ b/pkg/management/postgres/webserver/metricserver/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver @@ -24,8 +27,8 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -41,20 +44,22 @@ func collectPGWalArchiveMetric(exporter *Exporter) error { return nil } -func collectPGWALStat(e *Exporter) error { +func collectPGStatWAL(e *Exporter) error { walStat, err := e.instance.TryGetPgStatWAL() if walStat == nil || err != nil { return err } walMetrics := e.Metrics.PgStatWalMetrics - walMetrics.WalSync.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalSync)) - walMetrics.WalSyncTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalSyncTime) - walMetrics.WALBuffersFull.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WALBuffersFull)) + walMetrics.WalRecords.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalRecords)) walMetrics.WalFpi.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalFpi)) - walMetrics.WalWrite.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalWrite)) walMetrics.WalBytes.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalBytes)) - walMetrics.WalWriteTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalWriteTime) - walMetrics.WalRecords.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalRecords)) + walMetrics.WALBuffersFull.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WALBuffersFull)) + if version, _ := e.instance.GetPgVersion(); version.Major < 18 { + walMetrics.WalWrite.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalWrite)) + walMetrics.WalSync.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalSync)) + walMetrics.WalWriteTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalWriteTime) + walMetrics.WalSyncTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalSyncTime) + } return nil } @@ -73,12 +78,12 @@ type walSettings struct { } func (s *walSettings) synchronize(db *sql.DB, configSha256 string) error { - if s.configSha256 == configSha256 { + if s.configSha256 != "" && s.configSha256 == configSha256 { return nil } rows, err := db.Query(` -SELECT name, setting FROM pg_settings +SELECT name, setting FROM pg_catalog.pg_settings WHERE pg_settings.name IN ('wal_segment_size', 'min_wal_size', 'max_wal_size', 'wal_keep_size', 'wal_keep_segments', 'max_slot_wal_keep_size')`) // nolint: lll if err != nil { @@ -220,7 +225,7 @@ func collectPGWalSettings(exporter *Exporter, db *sql.DB) error { } func getWalVolumeSize() float64 { - cluster, err := cacheClient.GetCluster() + cluster, err := local.NewClient().Cache().GetCluster() if err != nil || !cluster.ShouldCreateWalArchiveVolume() { return 0 } diff --git a/pkg/management/postgres/webserver/metricserver/wal_test.go b/pkg/management/postgres/webserver/metricserver/wal_test.go index 5685bfa262..f46642caf9 100644 --- a/pkg/management/postgres/webserver/metricserver/wal_test.go +++ b/pkg/management/postgres/webserver/metricserver/wal_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver @@ -38,7 +41,7 @@ var _ = Describe("ensures walSettings works correctly", func() { maxSlotWalKeepSize float64 = -1 walKeepSegments float64 = 25 query = ` -SELECT name, setting FROM pg_settings +SELECT name, setting FROM pg_catalog.pg_settings WHERE pg_settings.name IN ('wal_segment_size', 'min_wal_size', 'max_wal_size', 'wal_keep_size', 'wal_keep_segments', 'max_slot_wal_keep_size')` ) diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 0e253ab393..b8d3f05a96 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +13,18 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver import ( "context" - "time" + "fmt" "github.com/cloudnative-pg/machinery/pkg/log" - "k8s.io/apimachinery/pkg/api/meta" + "github.com/cloudnative-pg/machinery/pkg/stringset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" @@ -30,11 +33,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" ) // PluginBackupCommand represent a backup command that is being executed @@ -43,8 +44,6 @@ type PluginBackupCommand struct { Backup *apiv1.Backup Client client.Client Recorder record.EventRecorder - Log log.Logger - Plugins repository.Interface } // NewPluginBackupCommand initializes a BackupCommand object, taking a physical @@ -55,23 +54,13 @@ func NewPluginBackupCommand( client client.Client, recorder record.EventRecorder, ) *PluginBackupCommand { - logger := log.WithValues( - "pluginConfiguration", backup.Spec.PluginConfiguration, - "backupName", backup.Name, - "backupNamespace", backup.Name) - - plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { - logger.Error(err, "Error while discovering plugins") - } + backup.EnsureGVKIsPresent() return &PluginBackupCommand{ Cluster: cluster, Backup: backup, Client: client, Recorder: recorder, - Log: logger, - Plugins: plugins, } } @@ -80,26 +69,48 @@ func (b *PluginBackupCommand) Start(ctx context.Context) { go b.invokeStart(ctx) } -// Close closes all the connections to the plugins -func (b *PluginBackupCommand) Close() { - b.Plugins.Close() -} - func (b *PluginBackupCommand) invokeStart(ctx context.Context) { - backupLog := b.Log.WithValues( + contextLogger := log.FromContext(ctx).WithValues( + "pluginConfiguration", b.Backup.Spec.PluginConfiguration, "backupName", b.Backup.Name, - "backupNamespace", b.Backup.Name) + "backupNamespace", b.Backup.Namespace) - cli, err := pluginClient.WithPlugins(ctx, b.Plugins, b.Cluster.Spec.Plugins.GetEnabledPluginNames()...) + plugins := repository.New() + defer plugins.Close() + + enabledPluginNamesSet := stringset.New() + enabledPluginNamesSet.Put(b.Backup.Spec.PluginConfiguration.Name) + cli, err := pluginClient.NewClient( + ctx, + enabledPluginNamesSet, + ) if err != nil { b.markBackupAsFailed(ctx, err) return } + defer cli.Close(ctx) + + if !cli.HasPlugin(b.Backup.Spec.PluginConfiguration.Name) { + b.markBackupAsFailed( + ctx, + fmt.Errorf("requested plugin is not available: %s", b.Backup.Spec.PluginConfiguration.Name), + ) + return + } // record the backup beginning - backupLog.Info("Plugin backup started") + contextLogger.Info("Plugin backup started") b.Recorder.Event(b.Backup, "Normal", "Starting", "Backup started") + // Update backup status in cluster conditions on startup + if err := b.retryWithRefreshedCluster(ctx, func() error { + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + }); err != nil { + contextLogger.Error(err, "Error changing backup condition (backup started)") + // We do not terminate here because we could still have a good backup + // even if we are unable to communicate with the Kubernetes API server + } + response, err := cli.Backup( ctx, b.Cluster, @@ -111,7 +122,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { return } - backupLog.Info("Backup completed") + contextLogger.Info("Backup completed") b.Recorder.Event(b.Backup, "Normal", "Completed", "Backup completed") // Set the status to completed @@ -128,6 +139,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { b.Backup.Status.BackupLabelFile = response.BackupLabelFile b.Backup.Status.TablespaceMapFile = response.TablespaceMapFile b.Backup.Status.Online = ptr.To(response.Online) + b.Backup.Status.PluginMetadata = response.Metadata if !response.StartedAt.IsZero() { b.Backup.Status.StartedAt = ptr.To(metav1.NewTime(response.StartedAt)) @@ -137,42 +149,25 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { } if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - backupLog.Error(err, "Can't set backup status as completed") + contextLogger.Error(err, "Can't set backup status as completed") } // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { - b.Log.Error(err, "Can't update the cluster with the completed backup data") + contextLogger.Error(err, "Can't update the cluster with the completed backup data") } } func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure error) { - backupStatus := b.Backup.GetStatus() + contextLogger := log.FromContext(ctx) // record the failure - b.Log.Error(failure, "Backup failed") + contextLogger.Error(failure, "Backup failed") b.Recorder.Event(b.Backup, "Normal", "Failed", "Backup failed") - // update backup status as failed - backupStatus.SetAsFailed(failure) - if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - b.Log.Error(err, "Can't mark backup as failed") - // We do not terminate here because we still want to set the condition on the cluster. - } - - // add backup failed condition to the cluster - if failErr := b.retryWithRefreshedCluster(ctx, func() error { - origCluster := b.Cluster.DeepCopy() - - meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(failure)) - - b.Cluster.Status.LastFailedBackup = utils.GetCurrentTimestampWithFormat(time.RFC3339) - return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) - }); failErr != nil { - b.Log.Error(failErr, "while setting cluster condition for failed backup") - } + _ = status.FlagBackupAsFailed(ctx, b.Client, b.Backup, b.Cluster, failure) } func (b *PluginBackupCommand) retryWithRefreshedCluster( diff --git a/pkg/management/postgres/webserver/probes/checker.go b/pkg/management/postgres/webserver/probes/checker.go new file mode 100644 index 0000000000..1ee4520021 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/checker.go @@ -0,0 +1,155 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "context" + "fmt" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// probeType is the type of the probe +type probeType string + +const ( + // probeTypeReadiness is the readiness probe + probeTypeReadiness probeType = "readiness" + // probeTypeStartup is the startup probe + probeTypeStartup probeType = "startup" +) + +type runner interface { + // IsHealthy evaluates the status of PostgreSQL. If the probe is positive, + // it returns a nil error, otherwise the error status describes why + // the probe is failing + IsHealthy(ctx context.Context, instance *postgres.Instance) error +} + +// Checker executes the probe and writes the response to the request +type Checker interface { + IsHealthy(ctx context.Context, w http.ResponseWriter) +} + +type executor struct { + cli client.Client + instance *postgres.Instance + probeType probeType +} + +// NewReadinessChecker creates a new instance of the readiness probe checker +func NewReadinessChecker( + cli client.Client, + instance *postgres.Instance, +) Checker { + return &executor{ + cli: cli, + instance: instance, + probeType: probeTypeReadiness, + } +} + +// NewStartupChecker creates a new instance of the startup probe checker +func NewStartupChecker( + cli client.Client, + instance *postgres.Instance, +) Checker { + return &executor{ + cli: cli, + instance: instance, + probeType: probeTypeStartup, + } +} + +// IsHealthy executes the underlying probe logic and writes a response to the request accordingly to the result obtained +func (e *executor) IsHealthy( + ctx context.Context, + w http.ResponseWriter, +) { + contextLogger := log.FromContext(ctx) + + var cluster apiv1.Cluster + if err := e.cli.Get( + ctx, + client.ObjectKey{Namespace: e.instance.GetNamespaceName(), Name: e.instance.GetClusterName()}, + &cluster, + ); err != nil { + contextLogger.Warning( + fmt.Sprintf("%s check failed, cannot check Cluster definition", e.probeType), + "err", err.Error(), + ) + http.Error( + w, + fmt.Sprintf("%s check failed cannot get Cluster definition: %s", e.probeType, err.Error()), + http.StatusInternalServerError, + ) + return + } + + probeRunner := getProbeRunnerFromCluster(e.probeType, cluster) + if err := probeRunner.IsHealthy(ctx, e.instance); err != nil { + contextLogger.Warning(fmt.Sprintf("%s probe failing", e.probeType), "err", err.Error()) + http.Error( + w, + fmt.Sprintf("%s check failed: %s", e.probeType, err.Error()), + http.StatusInternalServerError, + ) + return + } + + contextLogger.Trace(fmt.Sprintf("%s probe succeeding", e.probeType)) + _, _ = fmt.Fprint(w, "OK") +} + +func getProbeRunnerFromCluster(probeType probeType, cluster apiv1.Cluster) runner { + var probe *apiv1.ProbeWithStrategy + if cluster.Spec.Probes != nil { + switch probeType { + case probeTypeStartup: + probe = cluster.Spec.Probes.Startup + + case probeTypeReadiness: + probe = cluster.Spec.Probes.Readiness + } + } + + switch { + case probe == nil: + return pgIsReadyChecker{} + case probe.Type == apiv1.ProbeStrategyPgIsReady: + return pgIsReadyChecker{} + case probe.Type == apiv1.ProbeStrategyQuery: + return pgQueryChecker{} + case probe.Type == apiv1.ProbeStrategyStreaming: + result := pgStreamingChecker{} + if probe.MaximumLag != nil { + result.maximumLag = ptr.To(probe.MaximumLag.AsDec().UnscaledBig().Uint64()) + } + return result + } + + return pgIsReadyChecker{} +} diff --git a/pkg/management/postgres/webserver/probes/doc.go b/pkg/management/postgres/webserver/probes/doc.go new file mode 100644 index 0000000000..778ef4aac3 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/doc.go @@ -0,0 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package probes contains the implementation of startup, liveness and +// readiness probes +package probes diff --git a/pkg/configparser/environment.go b/pkg/management/postgres/webserver/probes/isready.go similarity index 53% rename from pkg/configparser/environment.go rename to pkg/management/postgres/webserver/probes/isready.go index 4c89740ce7..6a93055bf7 100644 --- a/pkg/configparser/environment.go +++ b/pkg/management/postgres/webserver/probes/isready.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,23 +13,22 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package configparser +package probes import ( - "os" -) + "context" -// EnvironmentSource is an interface to identify an environment values source. -type EnvironmentSource interface { - Getenv(key string) string -} + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) -// OsEnvironment is an EnvironmentSource that fetch data from the OS environment. -type OsEnvironment struct{} +// pgIsReadyChecker checks is PostgreSQL is ready +type pgIsReadyChecker struct{} -// Getenv retrieves the value of the environment variable named by the key. -func (OsEnvironment) Getenv(key string) string { - return os.Getenv(key) +// IsHealthy implements the runner interface +func (pgIsReadyChecker) IsHealthy(_ context.Context, instance *postgres.Instance) error { + return instance.IsReady() } diff --git a/pkg/management/postgres/webserver/probes/liveness.go b/pkg/management/postgres/webserver/probes/liveness.go new file mode 100644 index 0000000000..04e661e2a0 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/liveness.go @@ -0,0 +1,189 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +type livenessExecutor struct { + cli client.Client + instance *postgres.Instance + + lastestKnownCluster *apiv1.Cluster +} + +// NewLivenessChecker creates a new instance of the liveness probe checker +func NewLivenessChecker( + cli client.Client, + instance *postgres.Instance, +) Checker { + return &livenessExecutor{ + cli: cli, + instance: instance, + } +} + +// tryRefreshLatestClusterWithTimeout refreshes the latest cluster definition, returns a bool indicating if the +// operation was successful +func (e *livenessExecutor) tryRefreshLatestClusterWithTimeout(ctx context.Context, timeout time.Duration) bool { + timeoutContext, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + var cluster apiv1.Cluster + err := e.cli.Get( + timeoutContext, + client.ObjectKey{Namespace: e.instance.GetNamespaceName(), Name: e.instance.GetClusterName()}, + &cluster, + ) + if err != nil { + return false + } + + e.lastestKnownCluster = cluster.DeepCopy() + return true +} + +func (e *livenessExecutor) IsHealthy( + ctx context.Context, + w http.ResponseWriter, +) { + contextLogger := log.FromContext(ctx) + + isPrimary, isPrimaryErr := e.instance.IsPrimary() + if isPrimaryErr != nil { + contextLogger.Error( + isPrimaryErr, + "Error while checking the instance role, skipping automatic shutdown.") + _, _ = fmt.Fprint(w, "OK") + return + } + + if !isPrimary { + // There's no need to restart a replica if isolated + _, _ = fmt.Fprint(w, "OK") + return + } + + // We set a safe context timeout of 500ms to avoid a failed request from taking + // more time than the minimum configurable timeout (1s) of the container's livenessProbe, + // which otherwise could have triggered a restart of the instance. + if clusterRefreshed := e.tryRefreshLatestClusterWithTimeout(ctx, 500*time.Millisecond); clusterRefreshed { + // We correctly reached the API server but, as a failsafe measure, we + // exercise the reachability checker and leave a log message if something + // is not right. + // In this way a network configuration problem can be discovered as + // quickly as possible. + if err := evaluateLivenessPinger(ctx, e.lastestKnownCluster.DeepCopy()); err != nil { + contextLogger.Warning( + "Instance connectivity error - liveness probe succeeding because "+ + "the API server is reachable", + "err", + err.Error(), + ) + } + _, _ = fmt.Fprint(w, "OK") + return + } + + contextLogger = contextLogger.WithValues("apiServerReachable", false) + + if e.lastestKnownCluster == nil { + // We were never able to download a cluster definition. This should not + // happen because we check the API server connectivity as soon as the + // instance manager starts, before starting the probe web server. + // + // To be safe, we classify this instance manager to be not isolated and + // postpone any decision to a later liveness probe call. + contextLogger.Warning( + "No cluster definition has been received, skipping automatic shutdown.") + + _, _ = fmt.Fprint(w, "OK") + return + } + + err := evaluateLivenessPinger(ctx, e.lastestKnownCluster.DeepCopy()) + if err != nil { + contextLogger.Error(err, "Instance connectivity error - liveness probe failing") + http.Error( + w, + fmt.Sprintf("liveness check failed: %s", err.Error()), + http.StatusInternalServerError, + ) + return + } + + contextLogger.Debug( + "Instance connectivity test succeeded - liveness probe succeeding", + "latestKnownInstancesReportedState", e.lastestKnownCluster.Status.InstancesReportedState, + ) + _, _ = fmt.Fprint(w, "OK") +} + +func evaluateLivenessPinger( + ctx context.Context, + cluster *apiv1.Cluster, +) error { + contextLogger := log.FromContext(ctx) + + var cfg *apiv1.IsolationCheckConfiguration + if cluster.Spec.Probes != nil && cluster.Spec.Probes.Liveness != nil { + cfg = cluster.Spec.Probes.Liveness.IsolationCheck + } + if cfg == nil { + return nil + } + + // This should never happen given that we set a default value. Fail fast. + if cfg.Enabled == nil { + return errors.New("enabled field is not set in the liveness isolation check configuration") + } + + if !*cfg.Enabled { + contextLogger.Debug("pinger config not enabled, skipping") + return nil + } + + if cluster.Spec.Instances == 1 { + contextLogger.Debug("Only one instance present in the latest known cluster definition. Skipping automatic shutdown.") + return nil + } + + checker, err := buildInstanceReachabilityChecker(cfg) + if err != nil { + return fmt.Errorf("failed to build instance reachability checker: %w", err) + } + + if err := checker.ensureInstancesAreReachable(cluster); err != nil { + return fmt.Errorf("liveness check failed: %w", err) + } + + return nil +} diff --git a/pkg/management/postgres/webserver/probes/pinger.go b/pkg/management/postgres/webserver/probes/pinger.go new file mode 100644 index 0000000000..67fc2aaca1 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/pinger.go @@ -0,0 +1,144 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + cnpgUrl "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" + postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" +) + +// pinger can check if a certain instance is reachable by using +// the failsafe REST endpoint +type pinger struct { + dialer *net.Dialer + client *http.Client + + config *apiv1.IsolationCheckConfiguration +} + +// buildInstanceReachabilityChecker creates a new instance reachability checker by loading +// the server CA certificate from the same location that will be used by PostgreSQL. +// In this case, we avoid using the API Server as it may be unreliable. +func buildInstanceReachabilityChecker(cfg *apiv1.IsolationCheckConfiguration) (*pinger, error) { + if cfg == nil { + return nil, errors.New("isolation check configuration is nil") + } + + certificateLocation := postgresSpec.ServerCACertificateLocation + caCertificate, err := os.ReadFile(certificateLocation) //nolint:gosec + if err != nil { + return nil, fmt.Errorf("while reading server CA certificate [%s]: %w", certificateLocation, err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCertificate) + + tlsConfig := certs.NewTLSConfigFromCertPool(caCertPool) + + dialer := &net.Dialer{Timeout: time.Duration(cfg.ConnectionTimeout) * time.Millisecond} + + client := http.Client{ + Transport: &http.Transport{ + DialContext: dialer.DialContext, + TLSClientConfig: tlsConfig, + }, + Timeout: time.Duration(cfg.RequestTimeout) * time.Millisecond, + } + + return &pinger{ + dialer: dialer, + client: &client, + config: cfg, + }, nil +} + +// ping checks if the instance with the passed coordinates is reachable +// by calling the failsafe endpoint. +func (e *pinger) ping(host, ip string) error { + failsafeURL := url.URL{ + Scheme: "https", + Host: fmt.Sprintf("%s:%d", ip, cnpgUrl.StatusPort), + Path: cnpgUrl.PathFailSafe, + } + + var res *http.Response + var err error + if res, err = e.client.Get(failsafeURL.String()); err != nil { + return &pingError{ + host: host, + ip: ip, + err: err, + config: e.config, + } + } + + _ = res.Body.Close() + + return nil +} + +func (e pinger) ensureInstancesAreReachable(cluster *apiv1.Cluster) error { + for name, state := range cluster.Status.InstancesReportedState { + host := string(name) + ip := state.IP + if err := e.ping(host, ip); err != nil { + return err + } + } + + return nil +} + +// pingError is raised when the instance connectivity test failed. +type pingError struct { + host string + ip string + + config *apiv1.IsolationCheckConfiguration + + err error +} + +// Error implements the error interface +func (e *pingError) Error() string { + return fmt.Sprintf( + "instance connectivity error for instance [%s] with ip [%s] (requestTimeout:%v connectionTimeout:%v): %s", + e.host, + e.ip, + e.config.RequestTimeout, + e.config.ConnectionTimeout, + e.err.Error()) +} + +// Unwrap implements the error interface +func (e *pingError) Unwrap() error { + return e.err +} diff --git a/pkg/management/postgres/webserver/probes/query.go b/pkg/management/postgres/webserver/probes/query.go new file mode 100644 index 0000000000..2e2bce5313 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/query.go @@ -0,0 +1,44 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// pgQueryChecker checks if the PostgreSQL server can execute a simple query +type pgQueryChecker struct{} + +// IsHealthy implements the runner interface +func (c pgQueryChecker) IsHealthy(ctx context.Context, instance *postgres.Instance) error { + superUserDB, err := instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting superuser connection pool: %w", err) + } + + if err := superUserDB.PingContext(ctx); err != nil { + return fmt.Errorf("while pinging database: %w", err) + } + + return nil +} diff --git a/pkg/management/postgres/webserver/probes/streaming.go b/pkg/management/postgres/webserver/probes/streaming.go new file mode 100644 index 0000000000..dc69ee0c44 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/streaming.go @@ -0,0 +1,126 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "context" + "fmt" + "math" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// pgStreamingChecker checks if the replica is connected via streaming +// replication and, optionally, if the lag is within the specified maximum +type pgStreamingChecker struct { + maximumLag *uint64 +} + +// IsHealthy implements the runner interface +func (c pgStreamingChecker) IsHealthy(ctx context.Context, instance *postgres.Instance) error { + superUserDB, err := instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting superuser connection pool: %w", err) + } + + var configuredLag uint64 = math.MaxUint64 + if c.maximumLag != nil { + configuredLag = *c.maximumLag + } + + // At this point, the instance is already running. + // The startup probe succeeds if the instance satisfies any of the following conditions: + // - It is a primary instance. + // - It is a log shipping replica (including a designated primary). + // - It is a streaming replica with replication lag below the specified threshold. + // If no lag threshold is specified, the startup probe succeeds if the replica has successfully connected + // to its source at least once. + row := superUserDB.QueryRowContext( + ctx, + ` + WITH + lag AS ( + SELECT + (latest_end_lsn - pg_last_wal_replay_lsn()) AS value, + latest_end_time + FROM pg_catalog.pg_stat_wal_receiver + ) + SELECT + CASE + WHEN NOT pg_is_in_recovery() + THEN true + WHEN (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') + THEN true + WHEN (SELECT value FROM lag) <= $1 + THEN true + ELSE false + END AS ready_to_start, + COALESCE((SELECT value FROM lag), 0) AS lag, + COALESCE((SELECT latest_end_time FROM lag), '-infinity') AS latest_end_time + `, + configuredLag, + ) + if err := row.Err(); err != nil { + return fmt.Errorf("streaming replication check failed: %w", err) + } + + var status bool + var detectedLag uint64 + var latestEndTime string + if err := row.Scan(&status, &detectedLag, &latestEndTime); err != nil { + return fmt.Errorf("streaming replication check failed (scan): %w", err) + } + + if !status { + if detectedLag > configuredLag { + return &ReplicaLaggingError{ + DetectedLag: detectedLag, + ConfiguredLag: configuredLag, + LatestEndTime: latestEndTime, + } + } + return fmt.Errorf("replica not connected via streaming replication") + } + + return nil +} + +// ReplicaLaggingError is raised when a replica is lagging more +// than the configured cap +type ReplicaLaggingError struct { + // DetectedLag is the lag that was detected + DetectedLag uint64 + + // ConfiguredLag is the lag as configured in the probe + ConfiguredLag uint64 + + // LatestEndTime is the time of last write-ahead log location reported to + // origin WAL sender + LatestEndTime string +} + +func (e *ReplicaLaggingError) Error() string { + return fmt.Sprintf( + "streaming replica lagging; detectedLag=%v configuredLag=%v latestEndTime=%s", + e.DetectedLag, + e.ConfiguredLag, + e.LatestEndTime, + ) +} diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 231f6dccd0..13163c3de4 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver @@ -27,10 +30,14 @@ import ( "os" "os/exec" "path" + "sync" + "time" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "go.uber.org/multierr" + apierrs "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -38,17 +45,29 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/readiness" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/probes" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/upgrade" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) +const errCodeAnotherRequestInProgress = "ANOTHER_REQUEST_IN_PROGRESS" + +// IsRetryableError checks if the error is retryable +func IsRetryableError(err *Error) bool { + if err == nil { + return false + } + return err.Code == errCodeAnotherRequestInProgress +} + type remoteWebserverEndpoints struct { - typedClient client.Client - instance *postgres.Instance - currentBackup *backupConnection - readinessChecker *readiness.Data + typedClient client.Client + instance *postgres.Instance + currentBackup *backupConnection + ongoingBackupRequest sync.Mutex + // livenessChecker is a stateful probe + livenessChecker probes.Checker } // StartBackupRequest the required data to execute the pg_start_backup @@ -56,7 +75,6 @@ type StartBackupRequest struct { ImmediateCheckpoint bool `json:"immediateCheckpoint"` WaitForArchive bool `json:"waitForArchive"` BackupName string `json:"backupName"` - Force bool `json:"force,omitempty"` } // StopBackupRequest the required data to execute the pg_stop_backup @@ -81,15 +99,17 @@ func NewRemoteWebServer( } endpoints := remoteWebserverEndpoints{ - typedClient: typedClient, - instance: instance, - readinessChecker: readiness.ForInstance(instance), + typedClient: typedClient, + instance: instance, + livenessChecker: probes.NewLivenessChecker(typedClient, instance), } serveMux := http.NewServeMux() + serveMux.HandleFunc(url.PathFailSafe, endpoints.failSafe) serveMux.HandleFunc(url.PathPgModeBackup, endpoints.backup) serveMux.HandleFunc(url.PathHealth, endpoints.isServerHealthy) serveMux.HandleFunc(url.PathReady, endpoints.isServerReady) + serveMux.HandleFunc(url.PathStartup, endpoints.isServerStartedUp) serveMux.HandleFunc(url.PathPgStatus, endpoints.pgStatus) serveMux.HandleFunc(url.PathPgArchivePartial, endpoints.pgArchivePartial) serveMux.HandleFunc(url.PathPGControlData, endpoints.pgControlData) @@ -106,45 +126,125 @@ func NewRemoteWebServer( server.TLSConfig = &tls.Config{ MinVersion: tls.VersionTLS13, GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { - return instance.ServerCertificate, nil + return instance.GetServerCertificate(), nil }, } } - return NewWebServer(server), nil + srv := NewWebServer(server) + + srv.routines = append(srv.routines, endpoints.cleanupStaleCollections) + + return srv, nil } -func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, _ *http.Request) { - // If `pg_rewind` is running the Pod is starting up. +func (ws *remoteWebserverEndpoints) cleanupStaleCollections(ctx context.Context) { + closeBackupConnection := func(bc *backupConnection) { + log := log.WithValues( + "backupName", bc.data.BackupName, + "phase", bc.data.Phase, + ) + log.Warning("Closing stale PostgreSQL backup connection") + + if err := bc.conn.Close(); err != nil { + bc.err = multierr.Append(bc.err, err) + log.Error(err, "Error while closing stale PostgreSQL backup connection") + } + bc.data.Phase = Completed + } + + innerRoutine := func() { + if ws == nil { + return + } + bc := ws.currentBackup + if bc == nil || bc.conn == nil { + return + } + + ws.ongoingBackupRequest.Lock() + defer ws.ongoingBackupRequest.Unlock() + + if bc.data.Phase == Completed || bc.data.BackupName == "" { + return + } + + if bc.err != nil { + closeBackupConnection(bc) + return + } + + if err := bc.conn.PingContext(ctx); err != nil { + bc.err = fmt.Errorf("error while pinging: %w", err) + closeBackupConnection(bc) + return + } + + var backup apiv1.Backup + + err := ws.typedClient.Get(ctx, client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: bc.data.BackupName, + }, &backup) + if apierrs.IsNotFound(err) { + bc.err = fmt.Errorf("backup %s not found", bc.data.BackupName) + closeBackupConnection(bc) + return + } + if err != nil { + return + } + + if backup.Status.IsDone() { + bc.err = fmt.Errorf("backup %s is done", bc.data.BackupName) + closeBackupConnection(bc) + return + } + } + + for { + select { + case <-ctx.Done(): + return + case <-time.After(1 * time.Minute): + innerRoutine() + } + } +} + +// isServerStartedUp evaluates the liveness probe +func (ws *remoteWebserverEndpoints) isServerStartedUp(w http.ResponseWriter, req *http.Request) { + // If `pg_rewind` is running, it means that the Pod is starting up. // We need to report it healthy to avoid being killed by the kubelet. - // Same goes for instances with fencing on. if ws.instance.PgRewindIsRunning || ws.instance.MightBeUnavailable() { - log.Trace("Liveness probe skipped") + log.Trace("Startup probe skipped") _, _ = fmt.Fprint(w, "Skipped") return } - err := ws.instance.IsServerHealthy() - if err != nil { - log.Debug("Liveness probe failing", "err", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + checker := probes.NewStartupChecker(ws.typedClient, ws.instance) + checker.IsHealthy(req.Context(), w) +} - log.Trace("Liveness probe succeeding") +// This is the failsafe entrypoint +func (ws *remoteWebserverEndpoints) failSafe(w http.ResponseWriter, _ *http.Request) { _, _ = fmt.Fprint(w, "OK") } +// This is the liveness probe +func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, req *http.Request) { + ws.livenessChecker.IsHealthy(req.Context(), w) +} + // This is the readiness probe -func (ws *remoteWebserverEndpoints) isServerReady(w http.ResponseWriter, r *http.Request) { - if err := ws.readinessChecker.IsServerReady(r.Context()); err != nil { - log.Debug("Readiness probe failing", "err", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) +func (ws *remoteWebserverEndpoints) isServerReady(w http.ResponseWriter, req *http.Request) { + if !ws.instance.CanCheckReadiness() { + http.Error(w, "instance is not ready yet", http.StatusInternalServerError) return } - log.Trace("Readiness probe succeeding") - _, _ = fmt.Fprint(w, "OK") + checker := probes.NewReadinessChecker(ws.typedClient, ws.instance) + checker.IsHealthy(req.Context(), w) } // This probe is for the instance status, including replication @@ -239,6 +339,11 @@ func (ws *remoteWebserverEndpoints) updateInstanceManager( // nolint: gocognit func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Request) { log.Trace("request method", "method", req.Method) + if !ws.ongoingBackupRequest.TryLock() { + sendUnprocessableEntityJSONResponse(w, errCodeAnotherRequestInProgress, "") + return + } + defer ws.ongoingBackupRequest.Unlock() switch req.Method { case http.MethodGet: @@ -273,11 +378,10 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ } }() if ws.currentBackup != nil { - if !p.Force { - sendUnprocessableEntityJSONResponse(w, "PROCESS_ALREADY_RUNNING", "") - return - } - if err := ws.currentBackup.closeConnection(p.BackupName); err != nil { + log.Debug("trying to close the current backup connection", + "backupName", ws.currentBackup.data.BackupName, + ) + if err := ws.currentBackup.conn.Close(); err != nil { if !errors.Is(err, sql.ErrConnDone) { log.Error(err, "Error while closing backup connection (start)") } @@ -294,8 +398,12 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ sendUnprocessableEntityJSONResponse(w, "CANNOT_INITIALIZE_CONNECTION", err.Error()) return } - go ws.currentBackup.startBackup(context.Background(), p.BackupName) - sendJSONResponseWithData(w, 200, struct{}{}) + go ws.currentBackup.startBackup(context.Background(), &ws.ongoingBackupRequest) + + res := Response[BackupResultData]{ + Data: &ws.currentBackup.data, + } + sendJSONResponseWithData(w, 200, res) return case http.MethodPut: @@ -321,8 +429,23 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ return } + if ws.currentBackup.err != nil { + if err := ws.currentBackup.conn.Close(); err != nil { + if !errors.Is(err, sql.ErrConnDone) { + log.Error(err, "Error while closing backup connection (stop)") + } + } + + sendUnprocessableEntityJSONResponse(w, "BACKUP_FAILED", ws.currentBackup.err.Error()) + return + } + + res := Response[BackupResultData]{ + Data: &ws.currentBackup.data, + } + if ws.currentBackup.data.Phase == Closing { - sendJSONResponseWithData(w, 200, struct{}{}) + sendJSONResponseWithData(w, 200, res) return } @@ -332,19 +455,10 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ return } - if ws.currentBackup.err != nil { - if err := ws.currentBackup.closeConnection(p.BackupName); err != nil { - if !errors.Is(err, sql.ErrConnDone) { - log.Error(err, "Error while closing backup connection (stop)") - } - } + ws.currentBackup.data.Phase = Closing - sendJSONResponseWithData(w, 200, struct{}{}) - return - } - ws.currentBackup.setPhase(Closing, p.BackupName) - go ws.currentBackup.stopBackup(context.Background(), p.BackupName) - sendJSONResponseWithData(w, 200, struct{}{}) + go ws.currentBackup.stopBackup(context.Background(), &ws.ongoingBackupRequest) + sendJSONResponseWithData(w, 200, res) return } } @@ -357,13 +471,17 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req var cluster apiv1.Cluster if err := ws.typedClient.Get(req.Context(), - client.ObjectKey{Namespace: ws.instance.Namespace, Name: ws.instance.ClusterName}, + client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), + }, &cluster); err != nil { sendBadRequestJSONResponse(w, "NO_CLUSTER_FOUND", err.Error()) return } - if cluster.Status.TargetPrimary != ws.instance.PodName || cluster.Status.CurrentPrimary != ws.instance.PodName { + if cluster.Status.TargetPrimary != ws.instance.GetPodName() || + cluster.Status.CurrentPrimary != ws.instance.GetPodName() { sendBadRequestJSONResponse(w, "NOT_EXPECTED_PRIMARY", "") return } @@ -376,30 +494,33 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req } data := utils.ParsePgControldataOutput(out) - walFile := data[utils.PgControlDataKeyREDOWALFile] + walFile := data.GetREDOWALFile() if walFile == "" { sendBadRequestJSONResponse(w, "COULD_NOT_PARSE_REDOWAL_FILE", "") return } - pgWalDirectory := path.Join(os.Getenv("PGDATA"), "pg_wal") - walFilPath := path.Join(pgWalDirectory, walFile) - partialWalFilePath := fmt.Sprintf("%s.partial", walFilPath) + pgData := os.Getenv("PGDATA") + walRelativePath := path.Join("pg_wal", walFile) + partialWalFileRelativePath := fmt.Sprintf("%s.partial", walRelativePath) + walFileAbsolutePath := path.Join(pgData, walRelativePath) + partialWalFileAbsolutePath := path.Join(pgData, partialWalFileRelativePath) - if err := os.Link(walFilPath, partialWalFilePath); err != nil { + if err := os.Link(walFileAbsolutePath, partialWalFileAbsolutePath); err != nil { log.Error(err, "failed to get pg_controldata") sendBadRequestJSONResponse(w, "ERROR_WHILE_CREATING_SYMLINK", err.Error()) return } defer func() { - if err := fileutils.RemoveFile(partialWalFilePath); err != nil { + if err := fileutils.RemoveFile(partialWalFileAbsolutePath); err != nil { log.Error(err, "while deleting the partial wal file symlink") } }() - options := []string{constants.WalArchiveCommand, partialWalFilePath} + options := []string{constants.WalArchiveCommand, partialWalFileRelativePath} walArchiveCmd := exec.Command("/controller/manager", options...) // nolint: gosec + walArchiveCmd.Dir = pgData if err := execlog.RunBuffering(walArchiveCmd, "wal-archive-partial"); err != nil { sendBadRequestJSONResponse(w, "ERROR_WHILE_EXECUTING_WAL_ARCHIVE", err.Error()) return diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go index 1fcf09916b..f69c5db605 100644 --- a/pkg/management/postgres/webserver/webserver.go +++ b/pkg/management/postgres/webserver/webserver.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver @@ -50,24 +53,26 @@ type Response[T interface{}] struct { Error *Error `json:"error,omitempty"` } -// EnsureDataIsPresent returns an error if the data is field is nil -func (body Response[T]) EnsureDataIsPresent() error { - status := body.Data - if status != nil { - return nil - } - +// GetError returns an error if an error response is detected or if the data +// field is nil +func (body Response[T]) GetError() error { if body.Error != nil { return fmt.Errorf("encountered a body error while preparing, code: '%s', message: %s", body.Error.Code, body.Error.Message) } + status := body.Data + if status != nil { + return nil + } + return fmt.Errorf("encounteered an empty body while expecting it to not be empty") } // Webserver wraps a webserver to make it a kubernetes Runnable type Webserver struct { - server *http.Server + server *http.Server + routines []func(ctx context.Context) } // NewWebServer creates a Webserver as a Kubernetes Runnable, given a http.Server @@ -79,9 +84,11 @@ func NewWebServer(server *http.Server) *Webserver { // Start starts a webserver listener, implementing the K8s runnable interface func (ws *Webserver) Start(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + errChan := make(chan error, 1) go func() { - log.Info("Starting webserver", "address", ws.server.Addr, "hasTLS", ws.server.TLSConfig != nil) + contextLogger.Info("Starting webserver", "address", ws.server.Addr, "hasTLS", ws.server.TLSConfig != nil) var err error if ws.server.TLSConfig != nil { @@ -94,24 +101,31 @@ func (ws *Webserver) Start(ctx context.Context) error { } }() + subCtx, cancel := context.WithCancel(ctx) + defer cancel() + + for _, routine := range ws.routines { + go routine(subCtx) + } + select { // we exit with error code, potentially we could do a retry logic, but rarely a webserver that doesn't start will run // on subsequent tries case err := <-errChan: if errors.Is(err, http.ErrServerClosed) { - log.Error(err, "Closing the web server", "address", ws.server.Addr) + contextLogger.Error(err, "Closing the web server", "address", ws.server.Addr) } else { - log.Error(err, "Error while running the web server", "address", ws.server.Addr) + contextLogger.Error(err, "Error while running the web server", "address", ws.server.Addr) } return err case <-ctx.Done(): if err := ws.server.Shutdown(context.Background()); err != nil { - log.Error(err, "Error while shutting down the web server", "address", ws.server.Addr) + contextLogger.Error(err, "Error while shutting down the web server", "address", ws.server.Addr) return err } } - log.Info("Webserver exited", "address", ws.server.Addr) + contextLogger.Info("Webserver exited", "address", ws.server.Addr) return nil } diff --git a/pkg/management/upgrade/suite_test.go b/pkg/management/upgrade/suite_test.go index 4b84498c71..a5ac4c0595 100644 --- a/pkg/management/upgrade/suite_test.go +++ b/pkg/management/upgrade/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package upgrade diff --git a/pkg/management/upgrade/upgrade.go b/pkg/management/upgrade/upgrade.go index 8c8ce4ff13..972bc03f6a 100644 --- a/pkg/management/upgrade/upgrade.go +++ b/pkg/management/upgrade/upgrade.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package upgrade manages the in-place upgrade of the instance manager @@ -69,11 +72,6 @@ func FromReader( "name", updatedInstanceManager.Name(), "err", err) } }() - // Gather the status of the instance - instanceStatus, err := instance.GetStatus() - if err != nil { - return fmt.Errorf("while retrieving instance's status: %w", err) - } // Read the new instance manager version newHash, err := downloadAndCloseInstanceManagerBinary(updatedInstanceManager, r) @@ -82,8 +80,9 @@ func FromReader( } // Validate the hash of this instance manager - if err := validateInstanceManagerHash(typedClient, instance.ClusterName, instance.Namespace, - instanceStatus.InstanceArch, newHash); err != nil { + if err := validateInstanceManagerHash(typedClient, + instance.GetClusterName(), instance.GetNamespaceName(), + instance.GetArchitecture(), newHash); err != nil { return fmt.Errorf("while validating instance manager binary: %w", err) } diff --git a/pkg/management/upgrade/upgrade_test.go b/pkg/management/upgrade/upgrade_test.go index 6bb7e4d9ea..8a13741da4 100644 --- a/pkg/management/upgrade/upgrade_test.go +++ b/pkg/management/upgrade/upgrade_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package upgrade diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go index b72e89d9d1..b3157163fd 100644 --- a/pkg/management/url/url.go +++ b/pkg/management/url/url.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package url holds the constants for webserver routing @@ -31,18 +34,27 @@ const ( // PgBouncerMetricsPort is the port for the exporter of PgBouncer related metrics (HTTP) PgBouncerMetricsPort int32 = 9127 + // PathFailSafe is the path for the failsafe entrypoint + PathFailSafe string = "/failsafe" + // PathHealth is the URL path for Health State PathHealth string = "/healthz" - // PathReady is the URL oath for Ready State + // PathReady is the URL path for Ready State PathReady string = "/readyz" + // PathStartup is the URL path for the Startup probe + PathStartup string = "/startupz" + // PathPGControlData is the URL path for PostgreSQL pg_controldata output PathPGControlData string = "/pg/controldata" // PathPgStatus is the URL path for PostgreSQL Status PathPgStatus string = "/pg/status" + // PathWALArchiveStatusCondition is the URL path for setting the wal-archive condition on the Cluster object + PathWALArchiveStatusCondition string = "/cluster/status/condition/wal/archive" + // PathPgBackup is the URL path for PostgreSQL Backup PathPgBackup string = "/pg/backup" diff --git a/pkg/multicache/multinamespaced_cache.go b/pkg/multicache/multinamespaced_cache.go index 80f69eecfb..6998327db9 100644 --- a/pkg/multicache/multinamespaced_cache.go +++ b/pkg/multicache/multinamespaced_cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package multicache implements a cache that is able to work on multiple namespaces but also able to @@ -24,12 +27,11 @@ import ( "fmt" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) type multiNamespaceCache struct { diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/podlogs/cluster_writer.go similarity index 66% rename from pkg/utils/logs/cluster_logs.go rename to pkg/podlogs/cluster_writer.go index 84e45f47eb..bd235661b3 100644 --- a/pkg/utils/logs/cluster_logs.go +++ b/pkg/podlogs/cluster_writer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,18 +13,22 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package logs +package podlogs import ( + "bufio" "context" + "fmt" "io" "log" "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -37,14 +42,14 @@ import ( // wait before searching again for new cluster pods const DefaultFollowWaiting time.Duration = 1 * time.Second -// ClusterStreamingRequest represents a request to stream a cluster's pod logs +// ClusterWriter represents a request to stream a cluster's pod logs. // // If the Follow Option is set to true, streaming will sit in a loop looking -// for any new / regenerated pods, and will only exit when there are no pods +// for any new / regenerated pods and will only exit when there are no pods // streaming -type ClusterStreamingRequest struct { +type ClusterWriter struct { Cluster *apiv1.Cluster - Options *v1.PodLogOptions + Options *corev1.PodLogOptions Previous bool `json:"previous,omitempty"` FollowWaiting time.Duration // NOTE: the Client argument may be omitted, but it is good practice to pass it @@ -52,23 +57,28 @@ type ClusterStreamingRequest struct { Client kubernetes.Interface } -func (csr *ClusterStreamingRequest) getClusterName() string { +func (csr *ClusterWriter) getClusterName() string { return csr.Cluster.Name } -func (csr *ClusterStreamingRequest) getClusterNamespace() string { +func (csr *ClusterWriter) getClusterNamespace() string { return csr.Cluster.Namespace } -func (csr *ClusterStreamingRequest) getLogOptions() *v1.PodLogOptions { +func (csr *ClusterWriter) getLogOptions(containerName string) *corev1.PodLogOptions { if csr.Options == nil { - csr.Options = &v1.PodLogOptions{} + return &corev1.PodLogOptions{ + Container: containerName, + Previous: csr.Previous, + } } - csr.Options.Previous = csr.Previous - return csr.Options + options := csr.Options.DeepCopy() + options.Container = containerName + options.Previous = csr.Previous + return options } -func (csr *ClusterStreamingRequest) getKubernetesClient() kubernetes.Interface { +func (csr *ClusterWriter) getKubernetesClient() kubernetes.Interface { if csr.Client != nil { return csr.Client } @@ -79,7 +89,7 @@ func (csr *ClusterStreamingRequest) getKubernetesClient() kubernetes.Interface { return csr.Client } -func (csr *ClusterStreamingRequest) getFollowWaitingTime() time.Duration { +func (csr *ClusterWriter) getFollowWaitingTime() time.Duration { if csr.FollowWaiting > 0 { return csr.FollowWaiting } @@ -131,6 +141,8 @@ func (as *activeSet) add(name string) { // has returns true if and only if name is active func (as *activeSet) has(name string) bool { + as.m.Lock() + defer as.m.Unlock() _, found := as.set[name] return found } @@ -145,6 +157,8 @@ func (as *activeSet) drop(name string) { // isZero checks if there are any active processes func (as *activeSet) isZero() bool { + as.m.Lock() + defer as.m.Unlock() return len(as.set) == 0 } @@ -154,7 +168,7 @@ func (as *activeSet) wait() { } // SingleStream streams the cluster's pod logs and shunts them to a single io.Writer -func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io.Writer) error { +func (csr *ClusterWriter) SingleStream(ctx context.Context, writer io.Writer) error { client := csr.getKubernetesClient() streamSet := newActiveSet() defer func() { @@ -165,7 +179,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. for { var ( - podList *v1.PodList + podList *corev1.PodList err error ) if isFirstScan || csr.Options.Follow { @@ -185,20 +199,36 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. return nil } + wrappedWriter := safeWriterFrom(writer) for _, pod := range podList.Items { - if streamSet.has(pod.Name) { - continue + for _, container := range pod.Status.ContainerStatuses { + if container.State.Running != nil { + streamName := fmt.Sprintf("%s-%s", pod.Name, container.Name) + if streamSet.has(streamName) { + continue + } + + streamSet.add(streamName) + go csr.streamInGoroutine( + ctx, + pod.Name, + container.Name, + client, + streamSet, + wrappedWriter, + ) + } } - - streamSet.add(pod.Name) - go csr.streamInGoroutine(ctx, pod.Name, client, streamSet, - safeWriterFrom(writer)) } if streamSet.isZero() { return nil } - // wait before looking for new pods to log - time.Sleep(csr.getFollowWaitingTime()) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(csr.getFollowWaitingTime()): + } } } @@ -207,21 +237,22 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. // // IMPORTANT: the output writer should be goroutine-safe // NOTE: the default Go `log` package is used for logging because it's goroutine-safe -func (csr *ClusterStreamingRequest) streamInGoroutine( +func (csr *ClusterWriter) streamInGoroutine( ctx context.Context, podName string, + containerName string, client kubernetes.Interface, streamSet *activeSet, output io.Writer, ) { defer func() { - streamSet.drop(podName) + streamSet.drop(fmt.Sprintf("%s-%s", podName, containerName)) }() pods := client.CoreV1().Pods(csr.getClusterNamespace()) logsRequest := pods.GetLogs( podName, - csr.getLogOptions()) + csr.getLogOptions(containerName)) logStream, err := logsRequest.Stream(ctx) if err != nil { @@ -237,9 +268,26 @@ func (csr *ClusterStreamingRequest) streamInGoroutine( } }() - _, err = io.Copy(output, logStream) - if err != nil { - log.Printf("error sending logs to writer, pod %s: %v", podName, err) - return + scanner := bufio.NewScanner(logStream) + scanner.Buffer(make([]byte, 0, 4096), 1024*1024) + bufferedOutput := bufio.NewWriter(output) + +readLoop: + for scanner.Scan() { + select { + case <-ctx.Done(): + break readLoop + default: + data := scanner.Text() + if _, err := bufferedOutput.Write([]byte(data)); err != nil { + log.Printf("error writing log line to output: %v", err) + } + if err := bufferedOutput.WriteByte('\n'); err != nil { + log.Printf("error writing newline to output: %v", err) + } + if err := bufferedOutput.Flush(); err != nil { + log.Printf("error flushing output: %v", err) + } + } } } diff --git a/pkg/podlogs/cluster_writer_test.go b/pkg/podlogs/cluster_writer_test.go new file mode 100644 index 0000000000..432b053882 --- /dev/null +++ b/pkg/podlogs/cluster_writer_test.go @@ -0,0 +1,192 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package podlogs + +import ( + "bytes" + "context" + "strings" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type syncBuffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *syncBuffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} + +func (b *syncBuffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} + +var _ = Describe("Cluster logging tests", func() { + clusterNamespace := "cluster-test" + clusterName := "myTestCluster" + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName, + }, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName + "-1", + Labels: map[string]string{ + utils.ClusterLabelName: clusterName, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "postgresql", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + } + podWithSidecars := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName + "-1", + Labels: map[string]string{ + utils.ClusterLabelName: clusterName, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "postgresql", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + { + Name: "sidecar", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + } + It("should exit on ended pod logs with the non-follow option", func(ctx context.Context) { + client := fake.NewClientset(pod) + var logBuffer bytes.Buffer + var wait sync.WaitGroup + wait.Add(1) + go func() { + defer GinkgoRecover() + defer wait.Done() + streamClusterLogs := ClusterWriter{ + Cluster: cluster, + Options: &corev1.PodLogOptions{ + Follow: false, + }, + Client: client, + } + err := streamClusterLogs.SingleStream(ctx, &logBuffer) + Expect(err).NotTo(HaveOccurred()) + }() + ctx.Done() + wait.Wait() + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("should catch the logs of the sidecar too", func(ctx context.Context) { + client := fake.NewClientset(podWithSidecars) + var logBuffer bytes.Buffer + var wait sync.WaitGroup + wait.Add(1) + go func() { + defer GinkgoRecover() + defer wait.Done() + streamClusterLogs := ClusterWriter{ + Cluster: cluster, + Options: &corev1.PodLogOptions{ + Follow: false, + }, + Client: client, + } + err := streamClusterLogs.SingleStream(ctx, &logBuffer) + Expect(err).NotTo(HaveOccurred()) + }() + ctx.Done() + wait.Wait() + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\nfake logs\n")) + }) + + It("should catch extra logs if given the follow option", func(ctx context.Context) { + client := fake.NewClientset(pod) + + wg := sync.WaitGroup{} + wg.Add(1) + var logBuffer syncBuffer + + // let's set a short follow-wait, and keep the cluster streaming for two + // cycles + followWaiting := 150 * time.Millisecond + ctx2, cancel := context.WithTimeout(ctx, 300*time.Millisecond) + go func() { + // we always invoke done no matter what happens + defer wg.Done() + defer GinkgoRecover() + streamClusterLogs := ClusterWriter{ + Cluster: cluster, + Options: &corev1.PodLogOptions{ + Follow: true, + }, + FollowWaiting: followWaiting, + Client: client, + } + err := streamClusterLogs.SingleStream(ctx2, &logBuffer) + // we cannot reliably now if we will close the function before the context + // deadline, so we accept both nil and context.DeadlineExceeded + Expect(err).To(Or(BeNil(), Equal(context.DeadlineExceeded))) + }() + + time.Sleep(350 * time.Millisecond) + cancel() + wg.Wait() + + fakeLogCount := strings.Count(logBuffer.String(), "fake logs\n") + Expect(fakeLogCount).To(BeNumerically(">=", 2)) + }) +}) diff --git a/pkg/utils/logs/suite_test.go b/pkg/podlogs/suite_test.go similarity index 81% rename from pkg/utils/logs/suite_test.go rename to pkg/podlogs/suite_test.go index cfa5b0aeae..60466821b5 100644 --- a/pkg/utils/logs/suite_test.go +++ b/pkg/podlogs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package logs +package podlogs import ( "testing" diff --git a/pkg/podlogs/writer.go b/pkg/podlogs/writer.go new file mode 100644 index 0000000000..21bb572ebd --- /dev/null +++ b/pkg/podlogs/writer.go @@ -0,0 +1,135 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package podlogs contains code to fetch logs from Kubernetes pods +package podlogs + +import ( + "context" + "encoding/json" + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// Writer represents a request to stream a pod's logs and send them to an io.Writer +type Writer struct { + Pod corev1.Pod + Client kubernetes.Interface +} + +// NewPodLogsWriter initializes the struct +func NewPodLogsWriter(pod corev1.Pod, cli kubernetes.Interface) *Writer { + return &Writer{Pod: pod, Client: cli} +} + +// Single streams the pod logs and shunts them to the `writer`. +// If there are multiple containers, it will concatenate all the container streams into the writer +func (spl *Writer) Single(ctx context.Context, writer io.Writer, opts *corev1.PodLogOptions) (err error) { + if opts.Container != "" { + return spl.sendLogsToWriter(ctx, writer, opts) + } + + for _, container := range spl.Pod.Spec.Containers { + containerOpts := opts.DeepCopy() + containerOpts.Container = container.Name + if err := spl.sendLogsToWriter(ctx, writer, containerOpts); err != nil { + return err + } + } + return nil +} + +// writerConstructor is the interface representing an object that can spawn writers +type writerConstructor interface { + Create(name string) (io.Writer, error) +} + +func (spl *Writer) sendLogsToWriter( + ctx context.Context, + writer io.Writer, + options *corev1.PodLogOptions, +) error { + request := spl.Client.CoreV1().Pods(spl.Pod.Namespace).GetLogs(spl.Pod.Name, options) + + if options.Previous { + jsWriter := json.NewEncoder(writer) + if err := jsWriter.Encode("====== Beginning of Previous Log ====="); err != nil { + return err + } + // getting the Previous logs can fail (as with `kubectl logs -p`). Don't error out + if err := executeGetLogRequest(ctx, request, writer); err != nil { + // we try to print the json-safe error message. We don't exit on error + _ = json.NewEncoder(writer).Encode("Error fetching previous logs: " + err.Error()) + } + if err := jsWriter.Encode("====== End of Previous Log ====="); err != nil { + return err + } + } + return executeGetLogRequest(ctx, request, writer) +} + +// Multiple streams the pod logs, sending each container's stream to a separate writer +func (spl *Writer) Multiple( + ctx context.Context, + opts *corev1.PodLogOptions, + writerConstructor writerConstructor, + filePathGenerator func(string) string, +) error { + if opts.Container != "" { + return fmt.Errorf("use Single method to handle a single container output") + } + + for _, container := range spl.Pod.Spec.Containers { + writer, err := writerConstructor.Create(filePathGenerator(container.Name)) + if err != nil { + return err + } + containerOpts := opts.DeepCopy() + containerOpts.Container = container.Name + + if err := spl.sendLogsToWriter(ctx, writer, opts); err != nil { + return err + } + } + return nil +} + +func executeGetLogRequest(ctx context.Context, logRequest *rest.Request, writer io.Writer) error { + logStream, err := logRequest.Stream(ctx) + if err != nil { + return fmt.Errorf("when opening the log stream: %w", err) + } + defer func() { + innerErr := logStream.Close() + if err == nil && innerErr != nil { + err = fmt.Errorf("when closing the log stream: %w", innerErr) + } + }() + + _, err = io.Copy(writer, logStream) + if err != nil { + return fmt.Errorf("when copying the log stream to the writer: %w", err) + } + _, _ = writer.Write([]byte("\n")) + return nil +} diff --git a/pkg/podlogs/writer_test.go b/pkg/podlogs/writer_test.go new file mode 100644 index 0000000000..fa5d3d73d9 --- /dev/null +++ b/pkg/podlogs/writer_test.go @@ -0,0 +1,254 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package podlogs + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type multiWriter struct { + writers map[string]*bytes.Buffer +} + +func newMultiWriter() *multiWriter { + newMw := &multiWriter{ + writers: make(map[string]*bytes.Buffer), + } + return newMw +} + +func (mw *multiWriter) Create(name string) (io.Writer, error) { + var buffer bytes.Buffer + mw.writers[name] = &buffer + return &buffer, nil +} + +var _ = Describe("Pod logging tests", func() { + podNamespace := "pod-test" + podName := "pod-name-test" + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: podNamespace, + Name: podName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "postgres", + }, + }, + }, + } + + podWithSidecar := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: podNamespace, + Name: podName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "postgres", + }, + { + Name: "sidecar", + }, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "postgres", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: time.Now()}, + }, + }, + }, + { + Name: "sidecar", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: time.Now()}, + }, + }, + }, + }, + }, + } + + When("using the Stream function", func() { + It("should return the proper podName", func() { + streamPodLog := Writer{ + Pod: pod, + } + Expect(streamPodLog.Pod.Name).To(BeEquivalentTo(podName)) + Expect(streamPodLog.Pod.Namespace).To(BeEquivalentTo(podNamespace)) + }) + + It("should be able to handle the empty Pod", func(ctx context.Context) { + client := fake.NewClientset() + streamPodLog := Writer{ + Pod: corev1.Pod{}, + Client: client, + } + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(logBuffer.String()).To(BeEquivalentTo("")) + }) + + It("should read the logs of a pod with one container", func(ctx context.Context) { + client := fake.NewClientset(&pod) + streamPodLog := Writer{ + Pod: pod, + Client: client, + } + + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("should read the logs of a pod with multiple containers", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\nfake logs\n")) + }) + + It("should read only the specified container logs in a pod with multiple containers", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{ + Container: "postgres", + Previous: false, + }) + Expect(err).ToNot(HaveOccurred()) + + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("can follow pod logs", func(ctx SpecContext) { + client := fake.NewClientset(&pod) + var logBuffer bytes.Buffer + var wait sync.WaitGroup + wait.Add(1) + go func() { + defer GinkgoRecover() + defer wait.Done() + now := metav1.Now() + streamPodLog := Writer{ + Pod: pod, + Client: client, + } + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{ + Timestamps: false, + Follow: true, + SinceTime: &now, + }) + Expect(err).NotTo(HaveOccurred()) + }() + // Calling ctx.Done is not strictly necessary because the fake Client + // will terminate the pod stream anyway, ending Stream. + // But in "production", Stream will follow + // the pod logs until the context, or the logs, are over + ctx.Done() + wait.Wait() + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + }) + When("using the StreamMultiple function", func() { + It("should log each container into a separate writer", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + namer := func(container string) string { + return fmt.Sprintf("%s-%s.log", streamPodLog.Pod.Name, container) + } + mw := newMultiWriter() + err := streamPodLog.Multiple(ctx, &corev1.PodLogOptions{}, mw, namer) + Expect(err).ToNot(HaveOccurred()) + Expect(mw.writers).To(HaveLen(2)) + + Expect(mw.writers["pod-name-test-postgres.log"].String()).To(BeEquivalentTo("fake logs\n")) + Expect(mw.writers["pod-name-test-sidecar.log"].String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("can fetch the previous logs for each container", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + namer := func(container string) string { + return fmt.Sprintf("%s-%s.log", streamPodLog.Pod.Name, container) + } + mw := newMultiWriter() + err := streamPodLog.Multiple(ctx, &corev1.PodLogOptions{Previous: true}, mw, namer) + Expect(err).ToNot(HaveOccurred()) + Expect(mw.writers).To(HaveLen(2)) + + Expect(mw.writers["pod-name-test-postgres.log"].String()).To(BeEquivalentTo( + `"====== Beginning of Previous Log =====" +fake logs +"====== End of Previous Log =====" +fake logs +`)) + + Expect(mw.writers["pod-name-test-sidecar.log"].String()).To(BeEquivalentTo( + `"====== Beginning of Previous Log =====" +fake logs +"====== End of Previous Log =====" +fake logs +`)) + }) + }) +}) diff --git a/pkg/podspec/builder.go b/pkg/podspec/builder.go index e7d0be01b0..b38ed71d20 100644 --- a/pkg/podspec/builder.go +++ b/pkg/podspec/builder.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package podspec contains various utilities to deal with Pod Specs @@ -383,6 +386,28 @@ func (builder *Builder) WithInitContainerSecurityContext( return builder } +// WithInitContainerResources ensures that, if in the current status there is +// an init container with the passed name and the resources are empty, the resources will be +// set to the ones passed. +// If `overwrite` is true the resources are overwritten even when they're not empty +func (builder *Builder) WithInitContainerResources( + name string, + resources corev1.ResourceRequirements, + overwrite bool, +) *Builder { + builder.WithInitContainer(name) + + for idx, value := range builder.status.Spec.InitContainers { + if value.Name == name { + if overwrite || value.Resources.Limits == nil && value.Resources.Requests == nil { + builder.status.Spec.InitContainers[idx].Resources = resources + } + } + } + + return builder +} + // Build gets the final Pod template func (builder *Builder) Build() *apiv1.PodTemplateSpec { return &builder.status diff --git a/pkg/podspec/builder_test.go b/pkg/podspec/builder_test.go index 28e8027c4b..fdc5313440 100644 --- a/pkg/podspec/builder_test.go +++ b/pkg/podspec/builder_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podspec diff --git a/pkg/podspec/suite_test.go b/pkg/podspec/suite_test.go index 112a5c2938..c2b2a4e412 100644 --- a/pkg/podspec/suite_test.go +++ b/pkg/podspec/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podspec diff --git a/pkg/postgres/booleans.go b/pkg/postgres/booleans.go index 365bff9a9a..8a06a01a5f 100644 --- a/pkg/postgres/booleans.go +++ b/pkg/postgres/booleans.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/booleans_test.go b/pkg/postgres/booleans_test.go index d4733355f7..a568c55166 100644 --- a/pkg/postgres/booleans_test.go +++ b/pkg/postgres/booleans_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 6423097d33..84ad8c4676 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -19,12 +22,18 @@ package postgres import ( "bytes" "crypto/sha256" + "encoding/json" "fmt" + "iter" "math" + "path/filepath" + "slices" "sort" "strings" "text/template" "time" + + "github.com/cloudnative-pg/machinery/pkg/log" ) // WalLevelValue a value that is assigned to the 'wal_level' configuration field @@ -43,8 +52,14 @@ const ( // ParameterWalLogHints the configuration key containing the wal_log_hints value ParameterWalLogHints = "wal_log_hints" - // ParameterRecoveyMinApplyDelay is the configuration key containing the recovery_min_apply_delay parameter - ParameterRecoveyMinApplyDelay = "recovery_min_apply_delay" + // ParameterRecoveryMinApplyDelay is the configuration key containing the recovery_min_apply_delay parameter + ParameterRecoveryMinApplyDelay = "recovery_min_apply_delay" + + // ParameterSyncReplicationSlots the configuration key containing the sync_replication_slots value + ParameterSyncReplicationSlots = "sync_replication_slots" + + // ParameterHotStandbyFeedback the configuration key containing the hot_standby_feedback value + ParameterHotStandbyFeedback = "hot_standby_feedback" ) // An acceptable wal_level value @@ -87,9 +102,9 @@ const ( local all all peer map=local # Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert -hostssl all cnpg_pooler_pgbouncer all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica +hostssl all cnpg_pooler_pgbouncer all cert map=cnpg_pooler_pgbouncer # # USER-DEFINED RULES @@ -122,6 +137,12 @@ host all all all {{.DefaultAuthenticationMethod}} # Grant local access ('local' user map) local {{.Username}} postgres +# Grant streaming_replica access ('cnpg_streaming_replica' user map) +cnpg_streaming_replica streaming_replica streaming_replica + +# Grant cnpg_pooler_pgbouncer access ('cnpg_pooler_pgbouncer' user map) +cnpg_pooler_pgbouncer cnpg_pooler_pgbouncer cnpg_pooler_pgbouncer + # # USER-DEFINED RULES # @@ -144,6 +165,10 @@ local {{.Username}} postgres // ScratchDataDirectory is the directory to be used for scratch data ScratchDataDirectory = "/controller" + // TemporaryDirectory is the directory that is used to create + // temporary files, and configured as TMPDIR in PostgreSQL Pods + TemporaryDirectory = "/controller/tmp" + // SpoolDirectory is the directory where we spool the WAL files that // were pre-archived in parallel SpoolDirectory = ScratchDataDirectory + "/wal-archive-spool" @@ -227,11 +252,26 @@ local {{.Username}} postgres // config in the custom.conf file CNPGConfigSha256 = "cnpg.config_sha256" + // CNPGSynchronousStandbyNamesMetadata is used to inject inside PG the parameters + // that were used to calculate synchronous_standby_names. With this data we're + // able to know the actual settings without parsing back the + // synchronous_standby_names GUC + CNPGSynchronousStandbyNamesMetadata = "cnpg.synchronous_standby_names_metadata" + // SharedPreloadLibraries shared preload libraries key in the config SharedPreloadLibraries = "shared_preload_libraries" // SynchronousStandbyNames is the postgresql parameter key for synchronous standbys SynchronousStandbyNames = "synchronous_standby_names" + + // ExtensionControlPath is the postgresql parameter key for extension_control_path + ExtensionControlPath = "extension_control_path" + + // DynamicLibraryPath is the postgresql parameter key dynamic_library_path + DynamicLibraryPath = "dynamic_library_path" + + // ExtensionsBaseDirectory is the base directory to store ImageVolume Extensions + ExtensionsBaseDirectory = "/extensions" ) // hbaTemplate is the template used to create the HBA configuration @@ -240,15 +280,12 @@ var hbaTemplate = template.Must(template.New("pg_hba.conf").Parse(hbaTemplateStr // identTemplate is the template used to create the HBA configuration var identTemplate = template.Must(template.New("pg_ident.conf").Parse(identTemplateString)) -// MajorVersionRangeUnlimited is used to represent an unbound limit in a MajorVersionRange -const MajorVersionRangeUnlimited = 0 - -// MajorVersionRange is used to represent a range of PostgreSQL versions -type MajorVersionRange = struct { - // The minimum limit of PostgreSQL major version, extreme included +// MajorVersionRange represents a range of PostgreSQL major versions. +type MajorVersionRange struct { + // Min is the inclusive lower bound of the PostgreSQL major version range. Min int - // The maximum limit of PostgreSQL version, extreme excluded, or MajorVersionRangeUnlimited + // Max is the exclusive upper bound of the PostgreSQL major version range. Max int } @@ -275,6 +312,21 @@ type ConfigurationSettings struct { PgAuditSettings SettingsCollection } +// SynchronousStandbyNamesConfig is the parameters that are needed +// to create the synchronous_standby_names GUC +type SynchronousStandbyNamesConfig struct { + // Method accepts 'any' (quorum-based synchronous replication) + // or 'first' (priority-based synchronous replication) as values. + Method string `json:"method"` + + // NumSync is the number of synchronous standbys that transactions + // need to wait for replies from + NumSync int `json:"number"` + + // StandbyNames is the list of standby servers + StandbyNames []string `json:"standbyNames"` +} + // ConfigurationInfo contains the required information to create a PostgreSQL // configuration type ConfigurationInfo struct { @@ -284,14 +336,17 @@ type ConfigurationInfo struct { // The database settings to be used Settings ConfigurationSettings - // The major version + // The PostgreSQL version MajorVersion int // The list of user-level settings UserSettings map[string]string // The synchronous_standby_names configuration to be applied - SynchronousStandbyNames string + SynchronousStandbyNames SynchronousStandbyNamesConfig + + // The synchronized_standby_slots configuration to be applied + SynchronizedStandbySlots []string // List of additional sharedPreloadLibraries to be loaded AdditionalSharedPreloadLibraries []string @@ -324,6 +379,9 @@ type ConfigurationInfo struct { // Minimum apply delay of transaction RecoveryMinApplyDelay time.Duration + + // The list of additional extensions to be loaded into the PostgreSQL configuration + AdditionalExtensions []AdditionalExtensionConfiguration } // getAlterSystemEnabledValue returns a config compatible value for IsAlterSystemEnabled @@ -350,8 +408,8 @@ type ManagedExtension struct { SkipCreateExtension bool } -// IsUsed checks whether a configuration namespace in the namespaces list -// is used in the user provided configuration +// IsUsed checks whether a configuration namespace in the extension namespaces list +// is used in the user-provided configuration func (e ManagedExtension) IsUsed(userConfigs map[string]string) bool { for k := range userConfigs { for _, namespace := range e.Namespaces { @@ -363,6 +421,23 @@ func (e ManagedExtension) IsUsed(userConfigs map[string]string) bool { return false } +// IsManagedExtensionUsed checks whether a configuration namespace in the named extension namespaces list +// is used in the user-provided configuration +func IsManagedExtensionUsed(name string, userConfigs map[string]string) bool { + var extension *ManagedExtension + for _, ext := range ManagedExtensions { + if ext.Name == name { + extension = &ext + break + } + } + if extension == nil { + return false + } + + return extension.IsUsed(userConfigs) +} + var ( // ManagedExtensions contains the list of extensions the operator supports to manage ManagedExtensions = []ManagedExtension{ @@ -431,7 +506,6 @@ var ( // The following parameters need a reload to be applied "archive_cleanup_command": blockedConfigurationParameter, "archive_command": fixedConfigurationParameter, - "full_page_writes": fixedConfigurationParameter, "log_destination": blockedConfigurationParameter, "log_directory": blockedConfigurationParameter, "log_file_mode": blockedConfigurationParameter, @@ -440,6 +514,7 @@ var ( "log_rotation_size": blockedConfigurationParameter, "log_truncate_on_rotation": blockedConfigurationParameter, "pg_failover_slots.primary_dsn": fixedConfigurationParameter, + "pg_failover_slots.standby_slot_names": fixedConfigurationParameter, "promote_trigger_file": blockedConfigurationParameter, "recovery_end_command": blockedConfigurationParameter, "recovery_min_apply_delay": blockedConfigurationParameter, @@ -456,6 +531,7 @@ var ( "ssl_prefer_server_ciphers": fixedConfigurationParameter, "stats_temp_directory": blockedConfigurationParameter, "synchronous_standby_names": fixedConfigurationParameter, + "synchronized_standby_slots": fixedConfigurationParameter, "syslog_facility": blockedConfigurationParameter, "syslog_ident": blockedConfigurationParameter, "syslog_sequence_numbers": blockedConfigurationParameter, @@ -467,64 +543,52 @@ var ( CnpgConfigurationSettings = ConfigurationSettings{ GlobalDefaultSettings: SettingsCollection{ "archive_timeout": "5min", - "max_parallel_workers": "32", - "max_worker_processes": "32", - "max_replication_slots": "32", + "dynamic_shared_memory_type": "posix", + "full_page_writes": "on", "logging_collector": "on", "log_destination": "csvlog", + "log_directory": LogPath, + "log_filename": LogFileName, "log_rotation_age": "0", "log_rotation_size": "0", "log_truncate_on_rotation": "false", - "log_directory": LogPath, - "log_filename": LogFileName, - "dynamic_shared_memory_type": "posix", - "wal_sender_timeout": "5s", - "wal_receiver_timeout": "5s", + "max_parallel_workers": "32", + "max_worker_processes": "32", + "max_replication_slots": "32", + "shared_memory_type": "mmap", + "ssl_max_protocol_version": "TLSv1.3", + "ssl_min_protocol_version": "TLSv1.3", + "wal_keep_size": "512MB", "wal_level": "logical", ParameterWalLogHints: "on", + "wal_sender_timeout": "5s", + "wal_receiver_timeout": "5s", // Workaround for PostgreSQL not behaving correctly when // a default value is not explicit in the postgresql.conf and // the parameter cannot be changed without a restart. SharedPreloadLibraries: "", }, - DefaultSettings: map[MajorVersionRange]SettingsCollection{ - {MajorVersionRangeUnlimited, 120000}: { - "wal_keep_segments": "32", - }, - {120000, 130000}: { - "wal_keep_segments": "32", - "shared_memory_type": "mmap", - }, - {130000, MajorVersionRangeUnlimited}: { - "wal_keep_size": "512MB", - "shared_memory_type": "mmap", - }, - {120000, MajorVersionRangeUnlimited}: { - "ssl_max_protocol_version": "TLSv1.3", - "ssl_min_protocol_version": "TLSv1.3", - }, - }, MandatorySettings: SettingsCollection{ - "listen_addresses": "*", - "unix_socket_directories": SocketDirectory, - "hot_standby": "true", "archive_command": fmt.Sprintf( "/controller/manager wal-archive --log-destination %s/%s.json %%p", LogPath, LogFileName), - "port": fmt.Sprint(ServerPort), - "full_page_writes": "on", - "ssl": "on", - "ssl_cert_file": ServerCertificateLocation, - "ssl_key_file": ServerKeyLocation, - "ssl_ca_file": ClientCACertificateLocation, - "restart_after_crash": "false", + "hot_standby": "true", + "listen_addresses": "*", + "port": fmt.Sprint(ServerPort), + "restart_after_crash": "false", + "ssl": "on", + "ssl_cert_file": ServerCertificateLocation, + "ssl_key_file": ServerKeyLocation, + "ssl_ca_file": ClientCACertificateLocation, + "unix_socket_directories": SocketDirectory, }, } ) // CreateHBARules will create the content of pg_hba.conf file given // the rules set by the cluster spec -func CreateHBARules(hba []string, +func CreateHBARules( + hba []string, defaultAuthenticationMethod, ldapConfigString string, ) (string, error) { var hbaContent bytes.Buffer @@ -576,6 +640,11 @@ func (p *PgConfiguration) GetConfigurationParameters() map[string]string { return p.configs } +// SetConfigurationParameters sets the configuration parameters +func (p *PgConfiguration) SetConfigurationParameters(configs map[string]string) { + p.configs = configs +} + // OverwriteConfig overwrites a configuration in the map, given the key/value pair. // If the map is nil, it is created and the pair is added func (p *PgConfiguration) OverwriteConfig(key, value string) { @@ -628,7 +697,7 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { ignoreFixedSettingsFromUser := info.IncludingMandatory || !info.PreserveFixedSettingsFromUser // Set all the default settings - setDefaultConfigurations(info, configuration) + configuration.setDefaultConfigurations(info) // Apply all the values from the user, overriding defaults, // ignoring those which are fixed if ignoreFixedSettingsFromUser is true @@ -646,9 +715,7 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { configuration.OverwriteConfig(key, value) } - // IMPORTANT: yes, this field is called MajorVersion but actually - // it's just the PostgreSQL version number - if info.MajorVersion >= 170000 { + if info.MajorVersion >= 17 { configuration.OverwriteConfig("allow_alter_system", info.getAlterSystemEnabledValue()) } } @@ -666,9 +733,30 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { } // Apply the synchronous replication settings - syncStandbyNames := info.SynchronousStandbyNames + syncStandbyNames := info.SynchronousStandbyNames.String() if len(syncStandbyNames) > 0 { configuration.OverwriteConfig(SynchronousStandbyNames, syncStandbyNames) + + if metadata, err := json.Marshal(info.SynchronousStandbyNames); err != nil { + log.Error(err, + "Error while serializing streaming configuration parameters", + "synchronousStandbyNames", info.SynchronousStandbyNames) + } else { + configuration.OverwriteConfig(CNPGSynchronousStandbyNamesMetadata, string(metadata)) + } + } + + if len(info.SynchronizedStandbySlots) > 0 { + synchronizedStandbySlots := strings.Join(info.SynchronizedStandbySlots, ",") + if IsManagedExtensionUsed("pg_failover_slots", info.UserSettings) { + configuration.OverwriteConfig("pg_failover_slots.standby_slot_names", synchronizedStandbySlots) + } + + if info.MajorVersion >= 17 { + if isEnabled, _ := ParsePostgresConfigBoolean(info.UserSettings["sync_replication_slots"]); isEnabled { + configuration.OverwriteConfig("synchronized_standby_slots", synchronizedStandbySlots) + } + } } if info.ClusterName != "" { @@ -687,16 +775,16 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { // primary and on the replicas, setting it on both // is a safe approach. configuration.OverwriteConfig( - ParameterRecoveyMinApplyDelay, + ParameterRecoveryMinApplyDelay, fmt.Sprintf("%vs", math.Floor(info.RecoveryMinApplyDelay.Seconds()))) } if info.IncludingSharedPreloadLibraries { // Set all managed shared preload libraries - setManagedSharedPreloadLibraries(info, configuration) + configuration.setManagedSharedPreloadLibraries(info) // Set all user provided shared preload libraries - setUserSharedPreloadLibraries(info, configuration) + configuration.setUserSharedPreloadLibraries(info) } // Apply the list of temporary tablespaces @@ -704,35 +792,39 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { configuration.OverwriteConfig("temp_tablespaces", strings.Join(info.TemporaryTablespaces, ",")) } + // Setup additional extensions + if len(info.AdditionalExtensions) > 0 { + configuration.setExtensionControlPath(info) + configuration.setDynamicLibraryPath(info) + } + return configuration } // setDefaultConfigurations sets all default configurations into the configuration map // from the provided info -func setDefaultConfigurations(info ConfigurationInfo, configuration *PgConfiguration) { +func (p *PgConfiguration) setDefaultConfigurations(info ConfigurationInfo) { // start from the global default settings for key, value := range info.Settings.GlobalDefaultSettings { - configuration.OverwriteConfig(key, value) + p.OverwriteConfig(key, value) } // apply settings relative to a certain PostgreSQL version for constraints, settings := range info.Settings.DefaultSettings { - if constraints.Min == MajorVersionRangeUnlimited || (constraints.Min <= info.MajorVersion) { - if constraints.Max == MajorVersionRangeUnlimited || (info.MajorVersion < constraints.Max) { - for key, value := range settings { - configuration.OverwriteConfig(key, value) - } + if constraints.Min <= info.MajorVersion && info.MajorVersion < constraints.Max { + for key, value := range settings { + p.OverwriteConfig(key, value) } } } } // setManagedSharedPreloadLibraries sets all additional preloaded libraries -func setManagedSharedPreloadLibraries(info ConfigurationInfo, configuration *PgConfiguration) { +func (p *PgConfiguration) setManagedSharedPreloadLibraries(info ConfigurationInfo) { for _, extension := range ManagedExtensions { if extension.IsUsed(info.UserSettings) { for _, library := range extension.SharedPreloadLibraries { - configuration.AddSharedPreloadLibrary(library) + p.AddSharedPreloadLibrary(library) } } } @@ -742,8 +834,8 @@ func setManagedSharedPreloadLibraries(info ConfigurationInfo, configuration *PgC // The resulting list will have all the user provided libraries, followed by all the ones managed // by the operator, removing any duplicate and keeping the first occurrence in case of duplicates. // Therefore the user provided order is preserved, if an overlap (with the ones already present) happens -func setUserSharedPreloadLibraries(info ConfigurationInfo, configuration *PgConfiguration) { - oldLibraries := strings.Split(configuration.GetConfig(SharedPreloadLibraries), ",") +func (p *PgConfiguration) setUserSharedPreloadLibraries(info ConfigurationInfo) { + oldLibraries := strings.Split(p.GetConfig(SharedPreloadLibraries), ",") dedupedLibraries := make(map[string]bool, len(oldLibraries)+len(info.AdditionalSharedPreloadLibraries)) var libraries []string for _, library := range append(info.AdditionalSharedPreloadLibraries, oldLibraries...) { @@ -757,7 +849,7 @@ func setUserSharedPreloadLibraries(info ConfigurationInfo, configuration *PgConf } } if len(libraries) > 0 { - configuration.OverwriteConfig(SharedPreloadLibraries, strings.Join(libraries, ",")) + p.OverwriteConfig(SharedPreloadLibraries, strings.Join(libraries, ",")) } } @@ -788,3 +880,127 @@ func CreatePostgresqlConfFile(configuration *PgConfiguration) (string, string) { func escapePostgresConfValue(value string) string { return fmt.Sprintf("'%v'", strings.ReplaceAll(value, "'", "''")) } + +// AdditionalExtensionConfiguration is the configuration for an Extension added via ImageVolume +type AdditionalExtensionConfiguration struct { + // The name of the Extension + Name string + + // The list of directories that should be added to ExtensionControlPath. + ExtensionControlPath []string + + // The list of directories that should be added to DynamicLibraryPath. + DynamicLibraryPath []string +} + +// absolutizePaths returns an iterator over the passed paths, absolutized +// using the name of the extension +func (ext *AdditionalExtensionConfiguration) absolutizePaths(paths []string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, path := range paths { + if !yield(filepath.Join(ExtensionsBaseDirectory, ext.Name, path)) { + break + } + } + } +} + +// getRuntimeExtensionControlPath collects the absolute directories to be put +// into the `extension_control_path` GUC to support this additional extension +func (ext *AdditionalExtensionConfiguration) getRuntimeExtensionControlPath() iter.Seq[string] { + paths := []string{"share"} + if len(ext.ExtensionControlPath) > 0 { + paths = ext.ExtensionControlPath + } + + return ext.absolutizePaths(paths) +} + +// getDynamicLibraryPath collects the absolute directories to be put +// into the `dynamic_library_path` GUC to support this additional extension +func (ext *AdditionalExtensionConfiguration) getDynamicLibraryPath() iter.Seq[string] { + paths := []string{"lib"} + if len(ext.DynamicLibraryPath) > 0 { + paths = ext.DynamicLibraryPath + } + + return ext.absolutizePaths(paths) +} + +// setExtensionControlPath manages the `extension_control_path` GUC, merging +// the paths defined by the user with the ones provided by the +// `.spec.postgresql.extensions` stanza +func (p *PgConfiguration) setExtensionControlPath(info ConfigurationInfo) { + extensionControlPath := []string{"$system"} + + for _, extension := range info.AdditionalExtensions { + extensionControlPath = slices.AppendSeq( + extensionControlPath, + extension.getRuntimeExtensionControlPath(), + ) + } + + extensionControlPath = slices.AppendSeq( + extensionControlPath, + strings.SplitSeq(p.GetConfig(ExtensionControlPath), ":"), + ) + + extensionControlPath = slices.DeleteFunc( + extensionControlPath, + func(s string) bool { return s == "" }, + ) + + p.OverwriteConfig(ExtensionControlPath, strings.Join(extensionControlPath, ":")) +} + +// setDynamicLibraryPath manages the `dynamic_library_path` GUC, merging the +// paths defined by the user with the ones provided by the +// `.spec.postgresql.extensions` stanza +func (p *PgConfiguration) setDynamicLibraryPath(info ConfigurationInfo) { + dynamicLibraryPath := []string{"$libdir"} + + for _, extension := range info.AdditionalExtensions { + dynamicLibraryPath = slices.AppendSeq( + dynamicLibraryPath, + extension.getDynamicLibraryPath()) + } + + dynamicLibraryPath = slices.AppendSeq( + dynamicLibraryPath, + strings.SplitSeq(p.GetConfig(DynamicLibraryPath), ":")) + + dynamicLibraryPath = slices.DeleteFunc( + dynamicLibraryPath, + func(s string) bool { return s == "" }, + ) + + p.OverwriteConfig(DynamicLibraryPath, strings.Join(dynamicLibraryPath, ":")) +} + +// String creates the synchronous_standby_names PostgreSQL GUC +// with the passed members +func (s *SynchronousStandbyNamesConfig) String() string { + if s.IsZero() { + return "" + } + + escapePostgresConfLiteral := func(value string) string { + return fmt.Sprintf("\"%v\"", strings.ReplaceAll(value, "\"", "\"\"")) + } + + escapedReplicas := make([]string, len(s.StandbyNames)) + for idx, name := range s.StandbyNames { + escapedReplicas[idx] = escapePostgresConfLiteral(name) + } + + return fmt.Sprintf( + "%s %v (%v)", + s.Method, + s.NumSync, + strings.Join(escapedReplicas, ",")) +} + +// IsZero is true when synchronour replication is disabled +func (s SynchronousStandbyNamesConfig) IsZero() bool { + return len(s.StandbyNames) == 0 +} diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index 986d3ccf2b..e6366d292e 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -32,7 +35,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("apply the default settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + MajorVersion: 17, UserSettings: settings, IncludingMandatory: true, } @@ -44,7 +47,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("enforce the mandatory values", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + MajorVersion: 17, UserSettings: map[string]string{ "hot_standby": "off", }, @@ -57,7 +60,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("generate a config file", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + MajorVersion: 17, UserSettings: settings, IncludingMandatory: true, } @@ -78,24 +81,11 @@ var _ = Describe("PostgreSQL configuration creation", func() { Expect(confFile).To(ContainSubstring("log_destination = 'stderr'\nshared_buffers = '128KB'\n")) }) - When("version is 10", func() { - It("will use appropriate settings", func() { - info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - MajorVersion: 100000, - UserSettings: settings, - IncludingMandatory: true, - } - config := CreatePostgresqlConfiguration(info) - Expect(config.GetConfig("wal_keep_segments")).To(Equal("32")) - }) - }) - When("version is 13", func() { It("will use appropriate settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: settings, IncludingMandatory: true, } @@ -110,7 +100,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will set archive_mode to always", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: settings, IncludingMandatory: true, IsReplicaCluster: true, @@ -124,7 +114,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will set archive_mode to on", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: settings, IncludingMandatory: true, IsReplicaCluster: false, @@ -137,7 +127,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("adds shared_preload_library correctly", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: []string{"some_library", "another_library", ""}, @@ -152,7 +142,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("checks if PreserveFixedSettingsFromUser works properly", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + MajorVersion: 13, UserSettings: map[string]string{ "ssl": "off", "recovery_target_name": "test", @@ -195,7 +185,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("can properly set allow_alter_system to on", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: true, - MajorVersion: 170000, + MajorVersion: 17, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -205,7 +195,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("can properly set allow_alter_system to off", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: false, - MajorVersion: 180000, + MajorVersion: 18, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -217,7 +207,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("should not set allow_alter_system", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: false, - MajorVersion: 140000, + MajorVersion: 14, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -228,7 +218,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("should not set allow_alter_system", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: true, - MajorVersion: 140000, + MajorVersion: 14, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -307,7 +297,7 @@ var _ = Describe("pgaudit", func() { It("adds pgaudit to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: map[string]string{"pgaudit.something": "something"}, IncludingSharedPreloadLibraries: true, IncludingMandatory: true, @@ -324,7 +314,7 @@ var _ = Describe("pgaudit", func() { It("adds pg_stat_statements to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: map[string]string{"pg_stat_statements.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -341,7 +331,7 @@ var _ = Describe("pgaudit", func() { It("adds pg_stat_statements and pgaudit to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: map[string]string{ "pg_stat_statements.something": "something", "pgaudit.somethingelse": "somethingelse", @@ -361,7 +351,7 @@ var _ = Describe("pg_failover_slots", func() { It("adds pg_failover_slots to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -378,26 +368,128 @@ var _ = Describe("recovery_min_apply_delay", func() { It("is not added when zero", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, RecoveryMinApplyDelay: 0, } config := CreatePostgresqlConfiguration(info) - Expect(config.GetConfig(ParameterRecoveyMinApplyDelay)).To(BeEmpty()) + Expect(config.GetConfig(ParameterRecoveryMinApplyDelay)).To(BeEmpty()) }) It("is added to the configuration when specified", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + MajorVersion: 13, UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, RecoveryMinApplyDelay: 1 * time.Hour, } config := CreatePostgresqlConfiguration(info) - Expect(config.GetConfig(ParameterRecoveyMinApplyDelay)).To(Equal("3600s")) + Expect(config.GetConfig(ParameterRecoveryMinApplyDelay)).To(Equal("3600s")) + }) +}) + +var _ = Describe("PostgreSQL Extensions", func() { + Context("configuring extension_control_path and dynamic_library_path", func() { + const ( + share1 = ExtensionsBaseDirectory + "/postgis/share" + share2 = ExtensionsBaseDirectory + "/pgvector/share" + lib1 = ExtensionsBaseDirectory + "/postgis/lib" + lib2 = ExtensionsBaseDirectory + "/pgvector/lib" + ) + sharePaths := strings.Join([]string{share1, share2}, ":") + libPaths := strings.Join([]string{lib1, lib2}, ":") + + It("both empty when there are no Extensions defined", func() { + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEmpty()) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEmpty()) + }) + + It("configures them when an Extension is defined", func() { + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + AdditionalExtensions: []AdditionalExtensionConfiguration{ + { + Name: "postgis", + }, + { + Name: "pgvector", + }, + }, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEquivalentTo("$system:" + sharePaths)) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEquivalentTo("$libdir:" + libPaths)) + }) + + It("correctly merges the configuration with UserSettings", func() { + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + UserSettings: map[string]string{ + ExtensionControlPath: "/my/extension/path", + DynamicLibraryPath: "/my/library/path", + }, + AdditionalExtensions: []AdditionalExtensionConfiguration{ + { + Name: "postgis", + }, + { + Name: "pgvector", + }, + }, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEquivalentTo("$system:" + sharePaths + ":/my/extension/path")) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEquivalentTo("$libdir:" + libPaths + ":/my/library/path")) + }) + + It("when custom paths are provided (multi-extension)", func() { + const ( + geoShare1 = ExtensionsBaseDirectory + "/geo/postgis/share" + geoShare2 = ExtensionsBaseDirectory + "/geo/pgrouting/share" + geoLib1 = ExtensionsBaseDirectory + "/geo/postgis/lib" + geoLib2 = ExtensionsBaseDirectory + "/geo/pgrouting/lib" + utilityShare1 = ExtensionsBaseDirectory + "/utility/pgaudit/share" + utilityShare2 = ExtensionsBaseDirectory + "/utility/pg-failover-slots/share" + utilityLib1 = ExtensionsBaseDirectory + "/utility/pgaudit/lib" + utilityLib2 = ExtensionsBaseDirectory + "/utility/pg-failover-slots/lib" + ) + sharePaths = strings.Join([]string{geoShare1, geoShare2, utilityShare1, utilityShare2}, ":") + libPaths = strings.Join([]string{geoLib1, geoLib2, utilityLib1, utilityLib2}, ":") + + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + AdditionalExtensions: []AdditionalExtensionConfiguration{ + { + Name: "geo", + ExtensionControlPath: []string{"postgis/share", "./pgrouting/share"}, + DynamicLibraryPath: []string{"postgis/lib/", "/pgrouting/lib/"}, + }, + { + Name: "utility", + ExtensionControlPath: []string{"pgaudit/share", "./pg-failover-slots/share"}, + DynamicLibraryPath: []string{"pgaudit/lib/", "/pg-failover-slots/lib/"}, + }, + }, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEquivalentTo("$system:" + sharePaths)) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEquivalentTo("$libdir:" + libPaths)) + }) }) }) diff --git a/pkg/postgres/identifier.go b/pkg/postgres/identifier.go index 2d8f42f8b9..ca55cdc868 100644 --- a/pkg/postgres/identifier.go +++ b/pkg/postgres/identifier.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package postgres contains the function covering the PostgreSQL diff --git a/pkg/postgres/identifier_test.go b/pkg/postgres/identifier_test.go index f47c8d8d0f..1f2ecbb333 100644 --- a/pkg/postgres/identifier_test.go +++ b/pkg/postgres/identifier_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/plugin/config.go b/pkg/postgres/plugin/config.go new file mode 100644 index 0000000000..b63afdb37a --- /dev/null +++ b/pkg/postgres/plugin/config.go @@ -0,0 +1,72 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package plugin + +import ( + "context" + + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" +) + +// CreatePostgresqlConfigurationWithPlugins creates a new PostgreSQL configuration and enriches it by invoking +// the registered Plugins +func CreatePostgresqlConfigurationWithPlugins( + ctx context.Context, + info postgres.ConfigurationInfo, + operationType postgresClient.OperationType_Type, +) (*postgres.PgConfiguration, error) { + contextLogger := log.FromContext(ctx).WithName("enrichConfigurationWithPlugins") + + pgConf := postgres.CreatePostgresqlConfiguration(info) + + cluster, ok := ctx.Value(contextutils.ContextKeyCluster).(client.Object) + if !ok || cluster == nil { + contextLogger.Trace("skipping CreatePostgresqlConfigurationWithPlugins, cannot find the cluster inside the context") + return pgConf, nil + } + + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) + if pluginClient == nil { + contextLogger.Trace( + "skipping CreatePostgresqlConfigurationWithPlugins, cannot find the plugin client inside the context") + return pgConf, nil + } + + conf, err := pluginClient.EnrichConfiguration( + ctx, + cluster, + pgConf.GetConfigurationParameters(), + operationType, + ) + if err != nil { + contextLogger.Error(err, "failed to enrich configuration with plugins") + return nil, err + } + + pgConf.SetConfigurationParameters(conf) + + return pgConf, nil +} diff --git a/pkg/postgres/plugin/doc.go b/pkg/postgres/plugin/doc.go new file mode 100644 index 0000000000..3d9f82c9ce --- /dev/null +++ b/pkg/postgres/plugin/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package plugin contains the methods to interact with the plugins that have the Postgres capabilities +package plugin diff --git a/pkg/postgres/replication/doc.go b/pkg/postgres/replication/doc.go index cf972013c3..27a19a506c 100644 --- a/pkg/postgres/replication/doc.go +++ b/pkg/postgres/replication/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package replication contains the code relative to the diff --git a/pkg/postgres/replication/explicit.go b/pkg/postgres/replication/explicit.go index f935a16cb7..8772f880e3 100644 --- a/pkg/postgres/replication/explicit.go +++ b/pkg/postgres/replication/explicit.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,43 +13,156 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication import ( - "fmt" - "strings" + "slices" + "sort" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) -func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { +// placeholderInstanceNameSuffix is the name of the suffix to be added to the +// cluster name in order to create a fake instance name to be used in +// `synchronous_stanby_names` when the replica list would be empty. +const placeholderInstanceNameSuffix = "-placeholder" + +func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) postgres.SynchronousStandbyNamesConfig { + switch cluster.Spec.PostgresConfiguration.Synchronous.DataDurability { + case apiv1.DataDurabilityLevelPreferred: + return explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster) + + default: + return explicitSynchronousStandbyNamesDataDurabilityRequired(cluster) + } +} + +func explicitSynchronousStandbyNamesDataDurabilityRequired( + cluster *apiv1.Cluster, +) postgres.SynchronousStandbyNamesConfig { config := cluster.Spec.PostgresConfiguration.Synchronous // Create the list of pod names - clusterInstancesList := getSortedNonPrimaryInstanceNames(cluster) + clusterInstancesList := getSortedInstanceNames(cluster) + + // Cap the number of standby names using the configuration on the cluster if config.MaxStandbyNamesFromCluster != nil && len(clusterInstancesList) > *config.MaxStandbyNamesFromCluster { clusterInstancesList = clusterInstancesList[:*config.MaxStandbyNamesFromCluster] } // Add prefix and suffix - instancesList := config.StandbyNamesPre + instancesList := make([]string, 0, + len(clusterInstancesList)+len(config.StandbyNamesPre)+len(config.StandbyNamesPost)) + instancesList = append(instancesList, config.StandbyNamesPre...) instancesList = append(instancesList, clusterInstancesList...) instancesList = append(instancesList, config.StandbyNamesPost...) + + // An empty instances list would generate a PostgreSQL syntax error + // because configuring synchronous replication with an empty replica + // list is not allowed. + // Adding this as a safeguard, but this should never get into a postgres configuration. if len(instancesList) == 0 { - return "" + instancesList = []string{ + cluster.Name + placeholderInstanceNameSuffix, + } + } + + return postgres.SynchronousStandbyNamesConfig{ + Method: config.Method.ToPostgreSQLConfigurationKeyword(), + NumSync: config.Number, + StandbyNames: instancesList, + } +} + +func explicitSynchronousStandbyNamesDataDurabilityPreferred( + cluster *apiv1.Cluster, +) postgres.SynchronousStandbyNamesConfig { + config := cluster.Spec.PostgresConfiguration.Synchronous + + // Create the list of healthy replicas + instancesList := getSortedNonPrimaryHealthyInstanceNames(cluster) + + // Cap the number of standby names using the configuration on the cluster + if config.MaxStandbyNamesFromCluster != nil && len(instancesList) > *config.MaxStandbyNamesFromCluster { + instancesList = instancesList[:*config.MaxStandbyNamesFromCluster] + } + + // If data durability is not enforced, we cap the number of synchronous + // replicas to be required to the number or available replicas. + syncReplicaNumber := config.Number + if syncReplicaNumber > len(instancesList) { + syncReplicaNumber = len(instancesList) + } + + // An empty instances list is not allowed in synchronous_standby_names + if len(instancesList) == 0 { + return postgres.SynchronousStandbyNamesConfig{ + Method: "", + NumSync: 0, + StandbyNames: []string{}, + } + } + + return postgres.SynchronousStandbyNamesConfig{ + Method: config.Method.ToPostgreSQLConfigurationKeyword(), + NumSync: syncReplicaNumber, + StandbyNames: instancesList, + } +} + +// getSortedInstanceNames gets a list of all the known PostgreSQL instances in a +// order that would be meaningful to be used by `synchronous_standby_names`. +// +// The result is composed by: +// +// - the list of non-primary ready instances - these are most likely the +// instances to be used as a potential synchronous replicas +// - the list of non-primary non-ready instances +// - the name of the primary instance +// +// This algorithm have been designed to produce an order that would be +// meaningful to be used with priority-based synchronous replication (using the +// `first` method), while using the `maxStandbyNamesFromCluster` parameter. +func getSortedInstanceNames(cluster *apiv1.Cluster) []string { + nonPrimaryReadyInstances := make([]string, 0, cluster.Spec.Instances) + otherInstances := make([]string, 0, cluster.Spec.Instances) + primaryInstance := "" + + for state, instanceList := range cluster.Status.InstancesStatus { + for _, instance := range instanceList { + switch { + case cluster.Status.CurrentPrimary == instance: + primaryInstance = instance + + case state == apiv1.PodHealthy: + nonPrimaryReadyInstances = append(nonPrimaryReadyInstances, instance) + } + } + } + + for _, instance := range cluster.Status.InstanceNames { + if instance == primaryInstance { + continue + } + + if !slices.Contains(nonPrimaryReadyInstances, instance) { + otherInstances = append(otherInstances, instance) + } } - // Escape the pod list - escapedReplicas := make([]string, len(instancesList)) - for idx, name := range instancesList { - escapedReplicas[idx] = escapePostgresConfLiteral(name) + sort.Strings(nonPrimaryReadyInstances) + sort.Strings(otherInstances) + result := make([]string, 0, cluster.Spec.Instances) + result = append(result, nonPrimaryReadyInstances...) + result = append(result, otherInstances...) + if len(primaryInstance) > 0 { + result = append(result, primaryInstance) } - return fmt.Sprintf( - "%s %v (%v)", - config.Method.ToPostgreSQLConfigurationKeyword(), - config.Number, - strings.Join(escapedReplicas, ",")) + return result } diff --git a/pkg/postgres/replication/explicit_test.go b/pkg/postgres/replication/explicit_test.go index 2a3e13f9d5..8dc7d9ce45 100644 --- a/pkg/postgres/replication/explicit_test.go +++ b/pkg/postgres/replication/explicit_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication @@ -20,99 +23,281 @@ import ( "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("synchronous replica configuration with the new API", func() { - It("creates configuration with the ANY clause", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodAny, - Number: 2, - MaxStandbyNamesFromCluster: nil, - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\")")) - }) + When("data durability is required", func() { + It("creates configuration with the ANY clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodAny, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } - It("creates configuration with the FIRST clause", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: nil, - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\")")) - }) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: 2, + StandbyNames: []string{"three", "two", "one"}, + })) + }) - It("consider the maximum number of standby names", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: ptr.To(1), - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\")")) - }) + It("creates configuration with the FIRST clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two", "one"}, + })) + }) + + It("considers the maximum number of standby names", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three"}, + })) + }) + + It("prepends the prefix and append the suffix", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + StandbyNamesPre: []string{"prefix", "here"}, + StandbyNamesPost: []string{"suffix", "there"}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"prefix", "here", "three", "suffix", "there"}, + })) + }) + + It("enforce synchronous replication even if there are no healthy replicas", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + } + cluster.Status = apiv1.ClusterStatus{} + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"example-placeholder"}, + })) + }) - It("prepend the prefix and append the suffix", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: ptr.To(1), - StandbyNamesPre: []string{"prefix", "here"}, - StandbyNamesPost: []string{"suffix", "there"}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To( - Equal("FIRST 2 (\"prefix\",\"here\",\"three\",\"suffix\",\"there\")")) + It("includes pods that do not report the status", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "three"}, + }, + InstanceNames: []string{"one", "two", "three"}, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two", "one"}, + })) + }) }) - It("returns an empty value when no instance is available", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: ptr.To(1), - } - cluster.Status = apiv1.ClusterStatus{} + When("Data durability is preferred", func() { + It("creates configuration with the ANY clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodAny, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + // Important: the name of the primary is not included in the list + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: 2, + StandbyNames: []string{"three", "two"}, + })) + }) + + It("creates configuration with the FIRST clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + // Important: the name of the primary is not included in the list + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two"}, + })) + }) + + It("considers the maximum number of standby names", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "a-primary", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"a-primary", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 1, + StandbyNames: []string{"three"}, + })) + }) + + It("ignores the prefix and the suffix", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + StandbyNamesPre: []string{"prefix", "here"}, + StandbyNamesPost: []string{"suffix", "there"}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two"}, + })) + }) + + It("disables synchronous replication when no instance is available", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + } + cluster.Status = apiv1.ClusterStatus{} + + Expect(explicitSynchronousStandbyNames(cluster).IsZero()).To(BeTrue()) + }) + + It("does not include pods that do not report the status", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "three"}, + }, + InstanceNames: []string{"one", "two", "three"}, + } - Expect(explicitSynchronousStandbyNames(cluster)).To(BeEmpty()) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 1, + StandbyNames: []string{"three"}, + })) + }) }) }) diff --git a/pkg/postgres/replication/legacy.go b/pkg/postgres/replication/legacy.go index e422fa96cc..b3e552193c 100644 --- a/pkg/postgres/replication/legacy.go +++ b/pkg/postgres/replication/legacy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,48 +13,55 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication import ( - "fmt" + "context" "sort" - "strings" "github.com/cloudnative-pg/machinery/pkg/log" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // legacySynchronousStandbyNames sets the standby node list with the // legacy API -func legacySynchronousStandbyNames(cluster *apiv1.Cluster) string { - syncReplicas, syncReplicasElectable := getSyncReplicasData(cluster) +func legacySynchronousStandbyNames(ctx context.Context, cluster *apiv1.Cluster) postgres.SynchronousStandbyNamesConfig { + syncReplicas, syncReplicasElectable := getSyncReplicasData(ctx, cluster) if syncReplicasElectable != nil && syncReplicas > 0 { escapedReplicas := make([]string, len(syncReplicasElectable)) for idx, name := range syncReplicasElectable { escapedReplicas[idx] = escapePostgresConfLiteral(name) } - return fmt.Sprintf( - "ANY %v (%v)", - syncReplicas, - strings.Join(escapedReplicas, ",")) + return postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: syncReplicas, + StandbyNames: syncReplicasElectable, + } } - return "" + return postgres.SynchronousStandbyNamesConfig{} } // getSyncReplicasData computes the actual number of required synchronous replicas and the names of // the electable sync replicas given the requested min, max, the number of ready replicas in the cluster and the sync // replicas constraints (if any) -func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyncReplicas []string) { +func getSyncReplicasData( + ctx context.Context, + cluster *apiv1.Cluster, +) (syncReplicas int, electableSyncReplicas []string) { + contextLogger := log.FromContext(ctx) + // We start with the number of healthy replicas (healthy pods minus one) // and verify it is greater than 0 and between minSyncReplicas and maxSyncReplicas. // Formula: 1 <= minSyncReplicas <= SyncReplicas <= maxSyncReplicas < readyReplicas - readyReplicas := len(cluster.Status.InstancesStatus[utils.PodHealthy]) - 1 + readyReplicas := len(cluster.Status.InstancesStatus[apiv1.PodHealthy]) - 1 // If the number of ready replicas is negative, // there are no healthy Pods so no sync replica can be configured @@ -74,16 +82,16 @@ func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyn // temporarily unresponsive system) if readyReplicas < cluster.Spec.MinSyncReplicas { syncReplicas = readyReplicas - log.Warning("Ignore minSyncReplicas to enforce self-healing", + contextLogger.Warning("Ignore minSyncReplicas to enforce self-healing", "syncReplicas", readyReplicas, "minSyncReplicas", cluster.Spec.MinSyncReplicas, "maxSyncReplicas", cluster.Spec.MaxSyncReplicas) } - electableSyncReplicas = getElectableSyncReplicas(cluster) + electableSyncReplicas = getElectableSyncReplicas(ctx, cluster) numberOfElectableSyncReplicas := len(electableSyncReplicas) if numberOfElectableSyncReplicas < syncReplicas { - log.Warning("lowering sync replicas due to not enough electable instances for sync replication "+ + contextLogger.Warning("lowering sync replicas due to not enough electable instances for sync replication "+ "given the constraints", "electableSyncReplicasWithoutConstraints", syncReplicas, "electableSyncReplicasWithConstraints", numberOfElectableSyncReplicas, @@ -95,8 +103,10 @@ func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyn } // getElectableSyncReplicas computes the names of the instances that can be elected to sync replicas -func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { - nonPrimaryInstances := getSortedNonPrimaryInstanceNames(cluster) +func getElectableSyncReplicas(ctx context.Context, cluster *apiv1.Cluster) []string { + contextLogger := log.FromContext(ctx) + + nonPrimaryInstances := getSortedNonPrimaryHealthyInstanceNames(cluster) topology := cluster.Status.Topology // We need to include every replica inside the list of possible synchronous standbys if we have no constraints @@ -109,20 +119,20 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { // The same happens if we have failed to extract topology, we want to preserve the current status by adding all the // electable instances. if !topology.SuccessfullyExtracted { - log.Warning("topology data not extracted, falling back to all electable sync replicas") + contextLogger.Warning("topology data not extracted, falling back to all electable sync replicas") return nonPrimaryInstances } currentPrimary := apiv1.PodName(cluster.Status.CurrentPrimary) // given that the constraints are based off the primary instance if we still don't have one we cannot continue if currentPrimary == "" { - log.Warning("no primary elected, cannot compute electable sync replicas") + contextLogger.Warning("no primary elected, cannot compute electable sync replicas") return nil } currentPrimaryTopology, ok := topology.Instances[currentPrimary] if !ok { - log.Warning("current primary topology not yet extracted, cannot computed electable sync replicas", + contextLogger.Warning("current primary topology not yet extracted, cannot computed electable sync replicas", "instanceName", currentPrimary) return nil } @@ -134,7 +144,7 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { instanceTopology, ok := topology.Instances[name] // if we still don't have the topology data for the node we skip it from inserting it in the electable pool if !ok { - log.Warning("current instance topology not found", "instanceName", name) + contextLogger.Warning("current instance topology not found", "instanceName", name) continue } @@ -146,9 +156,9 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { return electableReplicas } -func getSortedNonPrimaryInstanceNames(cluster *apiv1.Cluster) []string { +func getSortedNonPrimaryHealthyInstanceNames(cluster *apiv1.Cluster) []string { var nonPrimaryInstances []string - for _, instance := range cluster.Status.InstancesStatus[utils.PodHealthy] { + for _, instance := range cluster.Status.InstancesStatus[apiv1.PodHealthy] { if cluster.Status.CurrentPrimary != instance { nonPrimaryInstances = append(nonPrimaryInstances, instance) } diff --git a/pkg/postgres/replication/legacy_test.go b/pkg/postgres/replication/legacy_test.go index d83e280680..c29a099ac0 100644 --- a/pkg/postgres/replication/legacy_test.go +++ b/pkg/postgres/replication/legacy_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,27 +13,29 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("ensuring the correctness of synchronous replica data calculation", func() { - It("should return all the non primary pods as electable", func() { + It("should return all the non primary pods as electable", func(ctx SpecContext) { cluster := createFakeCluster("example") - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(Equal(2)) Expect(names).To(Equal([]string{"example-2", "example-3"})) }) - It("should return only the pod in the different AZ", func() { + It("should return only the pod in the different AZ", func(ctx SpecContext) { const ( primaryPod = "exampleAntiAffinity-1" sameZonePod = "exampleAntiAffinity-2" @@ -59,37 +62,37 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati }, } - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(Equal(1)) Expect(names).To(Equal([]string{differentAZPod})) }) - It("should lower the synchronous replica number to enforce self-healing", func() { + It("should lower the synchronous replica number to enforce self-healing", func(ctx SpecContext) { cluster := createFakeCluster("exampleOnePod") cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "exampleOnePod-1", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"exampleOnePod-1"}, - utils.PodFailed: {"exampleOnePod-2", "exampleOnePod-3"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"exampleOnePod-1"}, + apiv1.PodFailed: {"exampleOnePod-2", "exampleOnePod-3"}, }, } - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(BeZero()) Expect(names).To(BeEmpty()) Expect(cluster.Spec.MinSyncReplicas).To(Equal(1)) }) - It("should behave correctly if there is no ready host", func() { + It("should behave correctly if there is no ready host", func(ctx SpecContext) { cluster := createFakeCluster("exampleNoPods") cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "example-1", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodFailed: {"exampleNoPods-1", "exampleNoPods-2", "exampleNoPods-3"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodFailed: {"exampleNoPods-1", "exampleNoPods-2", "exampleNoPods-3"}, }, } - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(BeZero()) Expect(names).To(BeEmpty()) @@ -97,18 +100,24 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati }) var _ = Describe("legacy synchronous_standby_names configuration", func() { - It("generate the correct value for the synchronous_standby_names parameter", func() { + It("generate the correct value for the synchronous_standby_names parameter", func(ctx SpecContext) { cluster := createFakeCluster("exampleNoPods") cluster.Spec.MinSyncReplicas = 2 cluster.Spec.MaxSyncReplicas = 2 cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "example-1", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, }, } - synchronousStandbyNames := legacySynchronousStandbyNames(cluster) - Expect(synchronousStandbyNames). - To(Equal("ANY 2 (\"one\",\"three\",\"two\")")) + synchronousStandbyNames := legacySynchronousStandbyNames(ctx, cluster) + + Expect(synchronousStandbyNames).To(Equal( + postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: 2, + StandbyNames: []string{"one", "three", "two"}, + }, + )) }) }) diff --git a/pkg/postgres/replication/replication.go b/pkg/postgres/replication/replication.go index c3746dd091..e791731dc4 100644 --- a/pkg/postgres/replication/replication.go +++ b/pkg/postgres/replication/replication.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,30 +13,35 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication import ( + "context" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // GetExpectedSyncReplicasNumber computes the actual number of required synchronous replicas -func GetExpectedSyncReplicasNumber(cluster *apiv1.Cluster) int { +func GetExpectedSyncReplicasNumber(ctx context.Context, cluster *apiv1.Cluster) int { if cluster.Spec.PostgresConfiguration.Synchronous != nil { return cluster.Spec.PostgresConfiguration.Synchronous.Number } - syncReplicas, _ := getSyncReplicasData(cluster) + syncReplicas, _ := getSyncReplicasData(ctx, cluster) return syncReplicas } // GetSynchronousStandbyNames gets the value to be applied // to synchronous_standby_names -func GetSynchronousStandbyNames(cluster *apiv1.Cluster) string { +func GetSynchronousStandbyNames(ctx context.Context, cluster *apiv1.Cluster) postgres.SynchronousStandbyNamesConfig { if cluster.Spec.PostgresConfiguration.Synchronous != nil { return explicitSynchronousStandbyNames(cluster) } - return legacySynchronousStandbyNames(cluster) + return legacySynchronousStandbyNames(ctx, cluster) } diff --git a/pkg/postgres/replication/suite_test.go b/pkg/postgres/replication/suite_test.go index d4750906f2..e693b884a2 100644 --- a/pkg/postgres/replication/suite_test.go +++ b/pkg/postgres/replication/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication @@ -21,7 +24,6 @@ import ( "testing" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -35,15 +37,16 @@ func TestReplication(t *testing.T) { func createFakeCluster(name string) *apiv1.Cluster { primaryPod := fmt.Sprintf("%s-1", name) cluster := &apiv1.Cluster{} + cluster.Name = name cluster.Default() cluster.Spec.Instances = 3 cluster.Spec.MaxSyncReplicas = 2 cluster.Spec.MinSyncReplicas = 1 cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: primaryPod, - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {primaryPod, fmt.Sprintf("%s-2", name), fmt.Sprintf("%s-3", name)}, - utils.PodFailed: {}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {primaryPod, fmt.Sprintf("%s-2", name), fmt.Sprintf("%s-3", name)}, + apiv1.PodFailed: {}, }, } return cluster diff --git a/pkg/postgres/replication/utils.go b/pkg/postgres/replication/utils.go index efc4f42b20..f39c28d576 100644 --- a/pkg/postgres/replication/utils.go +++ b/pkg/postgres/replication/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/roles.go b/pkg/postgres/roles.go index d956e862b4..b2565cf987 100644 --- a/pkg/postgres/roles.go +++ b/pkg/postgres/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/roles_test.go b/pkg/postgres/roles_test.go index bf4858242f..722ebd23c3 100644 --- a/pkg/postgres/roles_test.go +++ b/pkg/postgres/roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/status.go b/pkg/postgres/status.go index 14d2601a3a..d1b63ab6dd 100644 --- a/pkg/postgres/status.go +++ b/pkg/postgres/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -21,8 +24,10 @@ import ( "fmt" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/machinery/pkg/types" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -43,12 +48,13 @@ type PostgresqlStatus struct { IsArchivingWAL bool `json:"isArchivingWAL,omitempty"` Node string `json:"node"` Pod *corev1.Pod `json:"pod"` - TotalInstanceSize string `json:"totalInstanceSize"` // populated when MightBeUnavailable reported a healthy status even if it found an error MightBeUnavailableMaskedError string `json:"mightBeUnavailableMaskedError,omitempty"` - // Archiver status + // Hash of the current PostgreSQL configuration + LoadedConfigurationHash string `json:"loadedConfigurationHash,omitempty"` + // Archiver status LastArchivedWAL string `json:"lastArchivedWAL,omitempty"` LastArchivedWALTime string `json:"lastArchivedWALTime,omitempty"` LastFailedWAL string `json:"lastFailedWAL,omitempty"` @@ -214,7 +220,9 @@ func (list PgStatReplicationList) Less(i, j int) bool { // PostgresqlStatusList is a list of PostgreSQL status received from the Pods // that can be sorted considering the replication status type PostgresqlStatusList struct { - Items []PostgresqlStatus `json:"items"` + Items []PostgresqlStatus `json:"items"` + IsReplicaCluster bool `json:"-"` + CurrentPrimary string `json:"-"` } // GetNames returns a list of names of Pods @@ -298,6 +306,16 @@ func (list *PostgresqlStatusList) Less(i, j int) bool { return !list.Items[i].ReplayLsn.Less(list.Items[j].ReplayLsn) } + // In a replica cluster, all instances are standbys of an external primary. + // Therefore, `IsPrimary` is always false for every item in the list. + // We rely on the `CurrentPrimary` field to identify the designated primary + // instance that is replicating from the external cluster, ensuring it is + // sorted first among the standbys. + if list.IsReplicaCluster && + (list.Items[i].Pod.Name == list.CurrentPrimary && list.Items[j].Pod.Name != list.CurrentPrimary) { + return true + } + return list.Items[i].Pod.Name < list.Items[j].Pod.Name } @@ -406,3 +424,66 @@ func (list PostgresqlStatusList) InstancesReportingStatus() int { return n } + +// PrimaryNames get the names of each primary instance of this Cluster. Under +// normal conditions, this list is composed by one and only one name. +func (list PostgresqlStatusList) PrimaryNames() []string { + result := make([]string, 0, 1) + + for _, item := range list.Items { + if item.IsPrimary { + result = append(result, item.Pod.Name) + } + } + + return result +} + +// GetConfigurationReport generates a report on the PostgreSQL configuration +// status of each Pod in the list. +func (list PostgresqlStatusList) GetConfigurationReport() ConfigurationReport { + result := make([]ConfigurationReportEntry, len(list.Items)) + for i := range list.Items { + result[i].PodName = list.Items[i].Pod.Name + result[i].ConfigHash = list.Items[i].LoadedConfigurationHash + } + + return result +} + +// ConfigurationReportEntry contains information about the current +// PostgreSQL configuration of a Pod. +type ConfigurationReportEntry struct { + // PodName is the name of the Pod. + PodName string `json:"podName"` + + // ConfigHash is the hash of the currently loaded configuration or empty + // if the instance manager didn't report it. + ConfigHash string `json:"configHash"` +} + +// ConfigurationReport contains information about the current +// PostgreSQL configuration of each Pod. +type ConfigurationReport []ConfigurationReportEntry + +// IsUniform checks if every Pod has loaded the same PostgreSQL +// configuration. Returns: +// +// - true if every Pod reports the configuration, and the same +// configuration is used across all Pods. +// - false if every Pod reports the configuration and there +// are two Pods using different configurations. +// - nil if any Pod doesn't report the configuration. +func (report ConfigurationReport) IsUniform() *bool { + detectedConfigurationHash := stringset.New() + for _, item := range report { + if item.ConfigHash == "" { + // a Pod that isn't reporting its configuration, + // and we can't tell whether the configurations are uniform or not. + return nil + } + detectedConfigurationHash.Put(item.ConfigHash) + } + + return ptr.To(detectedConfigurationHash.Len() == 1) +} diff --git a/pkg/postgres/status_test.go b/pkg/postgres/status_test.go index f0390670b7..369bad982e 100644 --- a/pkg/postgres/status_test.go +++ b/pkg/postgres/status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres @@ -24,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -234,3 +238,71 @@ var _ = Describe("PostgreSQL status real", func() { }) }) }) + +var _ = Describe("Configuration report", func() { + DescribeTable( + "Configuration report", + func(report ConfigurationReport, result *bool) { + if result == nil { + Expect(report.IsUniform()).To(BeNil()) + } + Expect(report.IsUniform()).To(Equal(result)) + }, + Entry( + "with older and newer instance managers at the same time", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "", + }, + { + PodName: "cluster-example-2", + ConfigHash: "abc", + }, + }, + nil, + ), + Entry( + "with old instance managers", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "", + }, + { + PodName: "cluster-example-2", + ConfigHash: "", + }, + }, + nil, + ), + Entry( + "with instance managers that are reporting different configurations", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "abc", + }, + { + PodName: "cluster-example-2", + ConfigHash: "def", + }, + }, + ptr.To(false), + ), + Entry( + "with instance manager that are reporting the same configuration", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "abc", + }, + { + PodName: "cluster-example-2", + ConfigHash: "abc", + }, + }, + ptr.To(true), + ), + ) +}) diff --git a/pkg/postgres/suite_test.go b/pkg/postgres/suite_test.go index eb00961f69..9fe32b8ed4 100644 --- a/pkg/postgres/suite_test.go +++ b/pkg/postgres/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/version.go b/pkg/postgres/version.go deleted file mode 100644 index 2384b1003a..0000000000 --- a/pkg/postgres/version.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package postgres - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -const firstMajorWithoutMinor = 10 - -var semanticVersionRegex = regexp.MustCompile(`^(\d\.?)+`) - -// GetPostgresVersionFromTag parse a PostgreSQL version string returning -// a major version ID. Example: -// -// GetPostgresVersionFromTag("9.5.3") == 90503 -// GetPostgresVersionFromTag("10.2") == 100002 -// GetPostgresVersionFromTag("15beta1") == 150000 -func GetPostgresVersionFromTag(version string) (int, error) { - if !semanticVersionRegex.MatchString(version) { - return 0, - fmt.Errorf("version not starting with a semantic version regex (%v): %s", semanticVersionRegex, version) - } - - if versionOnly := semanticVersionRegex.FindString(version); versionOnly != "" { - version = versionOnly - } - - splitVersion := strings.Split(version, ".") - - idx := 0 - majorVersion, err := strconv.Atoi(splitVersion[idx]) - if err != nil { - return 0, fmt.Errorf("wrong PostgreSQL major in version %v", version) - } - parsedVersion := majorVersion * 10000 - idx++ - - if majorVersion < firstMajorWithoutMinor { - if len(splitVersion) <= idx { - return 0, fmt.Errorf("missing PostgreSQL minor in version %v", version) - } - minorVersion, err := strconv.Atoi(splitVersion[idx]) - if err != nil || minorVersion >= 100 { - return 0, fmt.Errorf("wrong PostgreSQL minor in version %v", version) - } - parsedVersion += minorVersion * 100 - idx++ - } - - if len(splitVersion) > idx { - patchLevel, err := strconv.Atoi(splitVersion[idx]) - if err != nil || patchLevel >= 100 { - return 0, fmt.Errorf("wrong PostgreSQL patch level in version %v", version) - } - parsedVersion += patchLevel - } - - return parsedVersion, nil -} - -// GetPostgresMajorVersion gets only the Major version from a PostgreSQL version string. -// Example: -// -// GetPostgresMajorVersion("90503") == 90500 -// GetPostgresMajorVersion("100002") == 100000 -func GetPostgresMajorVersion(parsedVersion int) int { - return parsedVersion - parsedVersion%100 -} - -// IsUpgradePossible detect if it's possible to upgrade from fromVersion to -// toVersion -func IsUpgradePossible(fromVersion, toVersion int) bool { - return GetPostgresMajorVersion(fromVersion) == GetPostgresMajorVersion(toVersion) -} diff --git a/pkg/postgres/version_test.go b/pkg/postgres/version_test.go deleted file mode 100644 index 3ef75e84db..0000000000 --- a/pkg/postgres/version_test.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package postgres - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("PostgreSQL version handling", func() { - Describe("parsing", func() { - It("should parse versions < 10", func() { - Expect(GetPostgresVersionFromTag("9.5.3")).To(Equal(90503)) - Expect(GetPostgresVersionFromTag("9.4")).To(Equal(90400)) - }) - - It("should parse versions >= 10", func() { - Expect(GetPostgresVersionFromTag("10.3")).To(Equal(100003)) - Expect(GetPostgresVersionFromTag("12.3")).To(Equal(120003)) - }) - - It("should ignore extra components", func() { - Expect(GetPostgresVersionFromTag("3.4.3.2.5")).To(Equal(30403)) - Expect(GetPostgresVersionFromTag("10.11.12")).To(Equal(100011)) - Expect(GetPostgresVersionFromTag("9.4_beautiful")).To(Equal(90400)) - Expect(GetPostgresVersionFromTag("11-1")).To(Equal(110000)) - Expect(GetPostgresVersionFromTag("15beta1")).To(Equal(150000)) - }) - - It("should gracefully handle errors", func() { - _, err := GetPostgresVersionFromTag("") - Expect(err).To(HaveOccurred()) - - _, err = GetPostgresVersionFromTag("8") - Expect(err).To(HaveOccurred()) - - _, err = GetPostgresVersionFromTag("9.five") - Expect(err).To(HaveOccurred()) - - _, err = GetPostgresVersionFromTag("10.old") - Expect(err).To(HaveOccurred()) - }) - }) - - Describe("major version extraction", func() { - It("should extract the major version for PostgreSQL >= 10", func() { - Expect(GetPostgresMajorVersion(100003)).To(Equal(100000)) - }) - - It("should extract the major version for PostgreSQL < 10", func() { - Expect(GetPostgresMajorVersion(90504)).To(Equal(90500)) - Expect(GetPostgresMajorVersion(90400)).To(Equal(90400)) - }) - }) - - Describe("detect whenever a version upgrade is possible using the numeric version", func() { - It("succeed when the major version is the same", func() { - Expect(IsUpgradePossible(100000, 100003)).To(BeTrue()) - Expect(IsUpgradePossible(90302, 90303)).To(BeTrue()) - }) - - It("prevent upgrading to a different major version", func() { - Expect(IsUpgradePossible(100003, 110003)).To(BeFalse()) - Expect(IsUpgradePossible(90604, 100000)).To(BeFalse()) - Expect(IsUpgradePossible(90503, 900604)).To(BeFalse()) - }) - }) -}) diff --git a/pkg/postgres/wal.go b/pkg/postgres/wal.go index 1ca31f0559..bf9de3103c 100644 --- a/pkg/postgres/wal.go +++ b/pkg/postgres/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/wal_test.go b/pkg/postgres/wal_test.go index f2631b7cbe..92a7d17cb5 100644 --- a/pkg/postgres/wal_test.go +++ b/pkg/postgres/wal_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/promotiontoken/doc.go b/pkg/promotiontoken/doc.go index d79f0e7a8e..8728b341d4 100644 --- a/pkg/promotiontoken/doc.go +++ b/pkg/promotiontoken/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package promotiontoken : This package contains the promotion token related operations diff --git a/pkg/promotiontoken/promotion_token.go b/pkg/promotiontoken/promotion_token.go index d3a0051769..edd2f3fcb1 100644 --- a/pkg/promotiontoken/promotion_token.go +++ b/pkg/promotiontoken/promotion_token.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promotiontoken diff --git a/pkg/promotiontoken/promotion_token_test.go b/pkg/promotiontoken/promotion_token_test.go index cce2e91479..0291293339 100644 --- a/pkg/promotiontoken/promotion_token_test.go +++ b/pkg/promotiontoken/promotion_token_test.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promotiontoken diff --git a/pkg/promotiontoken/suite_test.go b/pkg/promotiontoken/suite_test.go index a2af066a89..34e4953ba0 100644 --- a/pkg/promotiontoken/suite_test.go +++ b/pkg/promotiontoken/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promotiontoken diff --git a/pkg/reconciler/backup/volumesnapshot/catalog.go b/pkg/reconciler/backup/volumesnapshot/catalog.go index 38048b4803..6b75ac48c0 100644 --- a/pkg/reconciler/backup/volumesnapshot/catalog.go +++ b/pkg/reconciler/backup/volumesnapshot/catalog.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/doc.go b/pkg/reconciler/backup/volumesnapshot/doc.go index 1a02f8dbca..c70a9fbdca 100644 --- a/pkg/reconciler/backup/volumesnapshot/doc.go +++ b/pkg/reconciler/backup/volumesnapshot/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package volumesnapshot contains the VolumeSnapshot reconciler diff --git a/pkg/reconciler/backup/volumesnapshot/errors.go b/pkg/reconciler/backup/volumesnapshot/errors.go new file mode 100644 index 0000000000..c11421a4f0 --- /dev/null +++ b/pkg/reconciler/backup/volumesnapshot/errors.go @@ -0,0 +1,119 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package volumesnapshot + +import ( + "context" + "errors" + "regexp" + "strconv" + "strings" + + apierrs "k8s.io/apimachinery/pkg/api/errors" +) + +var ( + retryableStatusCodes = []int{408, 429, 500, 502, 503, 504} + httpStatusCodeRegex = regexp.MustCompile(`HTTPStatusCode:\s(\d{3})`) +) + +// isErrorRetryable detects is an error is retryable or not. +// +// Important: this function is intended for detecting errors that +// occur during communication between the operator and the Kubernetes +// API server, as well as between the operator and the instance +// manager. +// It is not designed to check errors raised by the CSI driver and +// exposed by the CSI snapshotter sidecar. +func isNetworkErrorRetryable(err error) bool { + return apierrs.IsServerTimeout(err) || apierrs.IsConflict(err) || apierrs.IsInternalError(err) || + errors.Is(err, context.DeadlineExceeded) +} + +// isCSIErrorMessageRetriable detects if a certain error message +// raised by the CSI driver corresponds to a retriable error or +// not. +// +// It relies on heuristics, as this information is not available in +// the Kubernetes VolumeSnapshot API, and the CSI driver does not +// expose it either. +func isCSIErrorMessageRetriable(msg string) bool { + isRetryableFuncs := []func(string) bool{ + isExplicitlyRetriableError, + isRetryableHTTPError, + isConflictError, + isContextDeadlineExceededError, + } + + for _, isRetryableFunc := range isRetryableFuncs { + if isRetryableFunc(msg) { + return true + } + } + + return false +} + +// isContextDeadlineExceededError detects context deadline exceeded errors +// These are timeouts that may be retried by the Kubernetes CSI controller +func isContextDeadlineExceededError(msg string) bool { + return strings.Contains(msg, "deadline exceeded") || strings.Contains(msg, "timed out") +} + +// isConflictError detects optimistic locking errors +func isConflictError(msg string) bool { + // Obviously this is a heuristic, but unfortunately we don't have + // the information we need. + // We're trying to handle the cases where the external-snapshotter + // controller failed on a conflict with the following error: + // + // > the object has been modified; please apply your changes to the + // > latest version and try again + + return strings.Contains(msg, "the object has been modified") +} + +// isExplicitlyRetriableError detects explicitly retriable errors as raised +// by the Azure CSI driver. These errors contain the "Retriable: true" +// string. +func isExplicitlyRetriableError(msg string) bool { + return strings.Contains(msg, "Retriable: true") +} + +// isRetryableHTTPError, will return a retry on the following status codes: +// - 408: Request Timeout +// - 429: Too Many Requests +// - 500: Internal Server Error +// - 502: Bad Gateway +// - 503: Service Unavailable +// - 504: Gateway Timeout +func isRetryableHTTPError(msg string) bool { + if matches := httpStatusCodeRegex.FindStringSubmatch(msg); len(matches) == 2 { + if code, err := strconv.Atoi(matches[1]); err == nil { + for _, retryableCode := range retryableStatusCodes { + if code == retryableCode { + return true + } + } + } + } + + return false +} diff --git a/pkg/reconciler/backup/volumesnapshot/errors_test.go b/pkg/reconciler/backup/volumesnapshot/errors_test.go new file mode 100644 index 0000000000..4105512fe3 --- /dev/null +++ b/pkg/reconciler/backup/volumesnapshot/errors_test.go @@ -0,0 +1,102 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package volumesnapshot + +import ( + "context" + "errors" + "fmt" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Retriable error messages", func() { + DescribeTable( + "Retriable error messages", + func(msg string, isRetriable bool) { + Expect(isCSIErrorMessageRetriable(msg)).To(Equal(isRetriable)) + }, + Entry("conflict", "Hey, the object has been modified!", true), + Entry("non-retriable error", "VolumeSnapshotClass not found", false), + Entry("explicitly retriable error", "Retriable: true, the storage is gone away forever", true), + Entry("explicitly non-retriable error", "Retriable: false because my pod is working", false), + Entry("error code 502 - retriable", "RetryAfter: 0s, HTTPStatusCode: 502, RawError: Internal Server Error", true), + Entry("error code 404 - non retriable", "RetryAfter: 0s, HTTPStatusCode: 404, RawError: Not found", false), + Entry("context deadline exceeded - retriable", "context deadline exceeded waiting for snapshot creation", true), + Entry("deadline exceeded - retriable", "deadline exceeded during Azure snapshot creation", true), + Entry("timed out - retriable", "operation timed out for csi-disk-handler", true), + ) + + Describe("isContextDeadlineExceededError", func() { + It("detects 'context deadline exceeded' error messages", func() { + Expect(isContextDeadlineExceededError("context deadline exceeded")).To(BeTrue()) + }) + + It("detects 'deadline exceeded' error messages", func() { + Expect(isContextDeadlineExceededError("deadline exceeded")).To(BeTrue()) + }) + + It("detects 'timed out' error messages", func() { + Expect(isContextDeadlineExceededError("operation timed out")).To(BeTrue()) + }) + + It("rejects non-timeout error messages", func() { + Expect(isContextDeadlineExceededError("not found")).To(BeFalse()) + Expect(isContextDeadlineExceededError("permission denied")).To(BeFalse()) + Expect(isContextDeadlineExceededError("invalid input")).To(BeFalse()) + }) + }) +}) + +var _ = Describe("isNetworkErrorRetryable", func() { + It("recognizes server timeout errors", func() { + err := apierrs.NewServerTimeout(schema.GroupResource{}, "test", 1) + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("recognizes conflict errors", func() { + err := apierrs.NewConflict(schema.GroupResource{}, "test", nil) + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("recognizes internal errors", func() { + err := apierrs.NewInternalError(fmt.Errorf("test error")) + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("recognizes context deadline exceeded errors", func() { + err := context.DeadlineExceeded + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("does not retry on not found errors", func() { + err := apierrs.NewNotFound(schema.GroupResource{}, "test") + Expect(isNetworkErrorRetryable(err)).To(BeFalse()) + }) + + It("does not retry on random errors", func() { + err := errors.New("random error") + Expect(isNetworkErrorRetryable(err)).To(BeFalse()) + }) +}) diff --git a/pkg/reconciler/backup/volumesnapshot/offline.go b/pkg/reconciler/backup/volumesnapshot/offline.go index 826a6ef1be..11944fb8a1 100644 --- a/pkg/reconciler/backup/volumesnapshot/offline.go +++ b/pkg/reconciler/backup/volumesnapshot/offline.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/offline_test.go b/pkg/reconciler/backup/volumesnapshot/offline_test.go index ac0175ab7c..dc2fdd7296 100644 --- a/pkg/reconciler/backup/volumesnapshot/offline_test.go +++ b/pkg/reconciler/backup/volumesnapshot/offline_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go index e1d74097a1..c33aed8c1a 100644 --- a/pkg/reconciler/backup/volumesnapshot/online.go +++ b/pkg/reconciler/backup/volumesnapshot/online.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot @@ -26,14 +29,15 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" ) type onlineExecutor struct { - backupClient webserver.BackupClient + backupClient remote.BackupClient } func newOnlineExecutor() *onlineExecutor { - return &onlineExecutor{backupClient: webserver.NewBackupClient()} + return &onlineExecutor{backupClient: remote.NewClient().Backup()} } func (o *onlineExecutor) finalize( @@ -42,16 +46,20 @@ func (o *onlineExecutor) finalize( backup *apiv1.Backup, targetPod *corev1.Pod, ) (*ctrl.Result, error) { - body, err := o.backupClient.StatusWithErrors(ctx, targetPod) + statusBody, err := o.backupClient.StatusWithErrors(ctx, targetPod) if err != nil { return nil, fmt.Errorf("while getting status while finalizing: %w", err) } - if err := body.EnsureDataIsPresent(); err != nil { + if webserver.IsRetryableError(statusBody.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + if err := statusBody.GetError(); err != nil { return nil, err } - status := body.Data + status := statusBody.Data if status.BackupName != backup.Name { return nil, fmt.Errorf("trying to stop backup with name: %s, while reconciling backup with name: %s", status.BackupName, @@ -71,9 +79,19 @@ func (o *onlineExecutor) finalize( switch status.Phase { case webserver.Started: - if err := o.backupClient.Stop(ctx, targetPod, *webserver.NewStopBackupRequest(backup.Name)); err != nil { + res, err := o.backupClient.Stop(ctx, targetPod, *webserver.NewStopBackupRequest(backup.Name)) + if err != nil { return nil, fmt.Errorf("while stopping the backup client: %w", err) } + + if webserver.IsRetryableError(res.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + if err := res.GetError(); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil case webserver.Closing: return &ctrl.Result{RequeueAfter: time.Second * 5}, nil @@ -94,30 +112,46 @@ func (o *onlineExecutor) prepare( volumeSnapshotConfig := backup.GetVolumeSnapshotConfiguration(*cluster.Spec.Backup.VolumeSnapshot) // Handle hot snapshots - body, err := o.backupClient.StatusWithErrors(ctx, targetPod) + statusBody, err := o.backupClient.StatusWithErrors(ctx, targetPod) if err != nil { return nil, fmt.Errorf("while getting status while preparing: %w", err) } - if err := body.EnsureDataIsPresent(); err != nil { - return nil, err + if webserver.IsRetryableError(statusBody.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil } - status := body.Data + status := statusBody.Data // if the backupName doesn't match it means we have an old stuck pending backup that we have to force out. - if backup.Name != status.BackupName || status.Phase == "" { + if status != nil && (backup.Name != status.BackupName || status.Phase == "") { req := webserver.StartBackupRequest{ ImmediateCheckpoint: volumeSnapshotConfig.OnlineConfiguration.GetImmediateCheckpoint(), WaitForArchive: volumeSnapshotConfig.OnlineConfiguration.GetWaitForArchive(), BackupName: backup.Name, - Force: true, } - if err := o.backupClient.Start(ctx, targetPod, req); err != nil { + res, err := o.backupClient.Start(ctx, targetPod, req) + if err != nil { return nil, fmt.Errorf("while trying to start the backup: %w", err) } + + if webserver.IsRetryableError(res.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + if err := res.GetError(); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil } + // If we are here, the status either contains errors + // or the running backup is the desired one. + // Handle the error case first + if err := statusBody.GetError(); err != nil { + return nil, err + } + switch status.Phase { case webserver.Starting: return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil diff --git a/pkg/reconciler/backup/volumesnapshot/online_test.go b/pkg/reconciler/backup/volumesnapshot/online_test.go index a0f70cceaa..d3e61cf4bf 100644 --- a/pkg/reconciler/backup/volumesnapshot/online_test.go +++ b/pkg/reconciler/backup/volumesnapshot/online_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot @@ -51,14 +54,26 @@ func (f *fakeBackupClient) StatusWithErrors( return f.response, f.injectStatusError } -func (f *fakeBackupClient) Start(_ context.Context, _ *corev1.Pod, _ webserver.StartBackupRequest) error { +func (f *fakeBackupClient) Start( + _ context.Context, + _ *corev1.Pod, + _ webserver.StartBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { f.startCalled = true - return f.injectStartError + return &webserver.Response[webserver.BackupResultData]{ + Data: &webserver.BackupResultData{}, + }, f.injectStartError } -func (f *fakeBackupClient) Stop(_ context.Context, _ *corev1.Pod, _ webserver.StopBackupRequest) error { +func (f *fakeBackupClient) Stop( + _ context.Context, + _ *corev1.Pod, + _ webserver.StopBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { f.stopCalled = true - return f.injectStopError + return &webserver.Response[webserver.BackupResultData]{ + Data: &webserver.BackupResultData{}, + }, f.injectStopError } var _ = Describe("onlineExecutor prepare", func() { diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go index 4d2d633ac4..7900e03359 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot @@ -35,8 +38,8 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -44,7 +47,7 @@ import ( type Reconciler struct { cli client.Client recorder record.EventRecorder - instanceStatusClient instance.Client + instanceStatusClient remote.InstanceClient } // ExecutorBuilder is a struct capable of creating a Reconciler @@ -61,7 +64,7 @@ func NewReconcilerBuilder( executor: Reconciler{ cli: cli, recorder: recorder, - instanceStatusClient: instance.NewStatusClient(), + instanceStatusClient: remote.NewClient().Instance(), }, } } @@ -96,11 +99,11 @@ func (se *Reconciler) enrichSnapshot( if data, err := se.instanceStatusClient.GetPgControlDataFromInstance(ctx, targetPod); err == nil { vs.Annotations[utils.PgControldataAnnotationName] = data pgControlData := utils.ParsePgControldataOutput(data) - timelineID, ok := pgControlData[utils.PgControlDataKeyLatestCheckpointTimelineID] + timelineID, ok := pgControlData.TryGetLatestCheckpointTimelineID() if ok { vs.Labels[utils.BackupTimelineLabelName] = timelineID } - startWal, ok := pgControlData[utils.PgControlDataKeyREDOWALFile] + startWal, ok := pgControlData.TryGetREDOWALFile() if ok { vs.Annotations[utils.BackupStartWALAnnotationName] = startWal // TODO: once we have online volumesnapshot backups, this should change @@ -155,6 +158,24 @@ func (se *Reconciler) Reconcile( backup *apiv1.Backup, targetPod *corev1.Pod, pvcs []corev1.PersistentVolumeClaim, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx).WithName("volumesnapshot_reconciler") + + res, err := se.internalReconcile(ctx, cluster, backup, targetPod, pvcs) + if isNetworkErrorRetryable(err) { + contextLogger.Error(err, "detected retryable error while executing snapshot backup, retrying...") + return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + return res, err +} + +func (se *Reconciler) internalReconcile( + ctx context.Context, + cluster *apiv1.Cluster, + backup *apiv1.Backup, + targetPod *corev1.Pod, + pvcs []corev1.PersistentVolumeClaim, ) (*ctrl.Result, error) { if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { return nil, fmt.Errorf("cannot execute a VolumeSnapshot on a cluster without configuration") @@ -190,7 +211,7 @@ func (se *Reconciler) Reconcile( } // Step 3: wait for snapshots to be provisioned - if res, err := se.waitSnapshotToBeProvisionedStep(ctx, volumeSnapshots); res != nil || err != nil { + if res, err := se.waitSnapshotToBeProvisionedStep(ctx, backup, volumeSnapshots); res != nil || err != nil { return res, err } @@ -207,7 +228,7 @@ func (se *Reconciler) Reconcile( } // Step 5: wait for snapshots to be ready to use - if res, err := se.waitSnapshotToBeReadyStep(ctx, volumeSnapshots); res != nil || err != nil { + if res, err := se.waitSnapshotToBeReadyStep(ctx, backup, volumeSnapshots); res != nil || err != nil { return res, err } @@ -385,10 +406,11 @@ func (se *Reconciler) createSnapshotPVCGroupStep( // waitSnapshotToBeProvisionedStep waits for every PVC snapshot to be claimed func (se *Reconciler) waitSnapshotToBeProvisionedStep( ctx context.Context, + backup *apiv1.Backup, snapshots []storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { for i := range snapshots { - if res, err := se.waitSnapshotToBeProvisionedAndAnnotate(ctx, &snapshots[i]); res != nil || err != nil { + if res, err := se.waitSnapshotToBeProvisionedAndAnnotate(ctx, backup, &snapshots[i]); res != nil || err != nil { return res, err } } @@ -399,10 +421,11 @@ func (se *Reconciler) waitSnapshotToBeProvisionedStep( // waitSnapshotToBeReadyStep waits for every PVC snapshot to be ready to use func (se *Reconciler) waitSnapshotToBeReadyStep( ctx context.Context, + backup *apiv1.Backup, snapshots []storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { for i := range snapshots { - if res, err := se.waitSnapshotToBeReady(ctx, &snapshots[i]); res != nil || err != nil { + if res, err := se.waitSnapshotToBeReady(ctx, backup, &snapshots[i]); res != nil || err != nil { return res, err } } @@ -501,20 +524,14 @@ func transferLabelsToAnnotations(labels map[string]string, annotations map[strin // SnapshotStartTimeAnnotationName and SnapshotEndTimeAnnotationName. func (se *Reconciler) waitSnapshotToBeProvisionedAndAnnotate( ctx context.Context, + backup *apiv1.Backup, snapshot *storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) info := parseVolumeSnapshotInfo(snapshot) if info.error != nil { - if info.error.isRetryable() { - contextLogger.Error(info.error, - "Retryable snapshot provisioning error, trying again", - "volumeSnapshotName", snapshot.Name) - return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } - - return nil, info.error + return se.handleSnapshotErrors(ctx, backup, info.error) } if !info.provisioned { contextLogger.Info( @@ -544,20 +561,14 @@ func (se *Reconciler) waitSnapshotToBeProvisionedAndAnnotate( // SnapshotStartTimeAnnotationName and SnapshotEndTimeAnnotationName. func (se *Reconciler) waitSnapshotToBeReady( ctx context.Context, + backup *apiv1.Backup, snapshot *storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) info := parseVolumeSnapshotInfo(snapshot) if info.error != nil { - if info.error.isRetryable() { - contextLogger.Error(info.error, - "Retryable snapshot provisioning error, trying again", - "volumeSnapshotName", snapshot.Name) - return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } - - return nil, info.error + return se.handleSnapshotErrors(ctx, backup, info.error) } if !info.ready { contextLogger.Info( @@ -570,3 +581,89 @@ func (se *Reconciler) waitSnapshotToBeReady( return nil, nil } + +func (se *Reconciler) handleSnapshotErrors( + ctx context.Context, + backup *apiv1.Backup, + snapshotErr *volumeSnapshotError, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx). + WithName("handle_snapshot_errors") + + if !snapshotErr.isRetryable() { + return nil, snapshotErr + } + + if err := addDeadlineStatus(ctx, se.cli, backup); err != nil { + return nil, fmt.Errorf("while adding deadline status: %w", err) + } + + exceeded, err := isDeadlineExceeded(backup) + if err != nil { + return nil, fmt.Errorf("while checking if deadline was exceeded: %w", err) + } + if exceeded { + return nil, fmt.Errorf("deadline exceeded for error %w", snapshotErr) + } + + contextLogger.Error(snapshotErr, + "Retryable snapshot provisioning error, trying again", + ) + return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func isDeadlineExceeded(backup *apiv1.Backup) (bool, error) { + if backup.Status.PluginMetadata[pluginName] == "" { + return false, fmt.Errorf("no plugin metadata found in backup status") + } + + data, err := unmarshalMetadata(backup.Status.PluginMetadata[pluginName]) + if err != nil { + return false, fmt.Errorf("while unmarshalling plugin metadata: %w", err) + } + + // if the deadline have passed since firstFailureTime we need to consider the deadline exceeded + deadline := int64(backup.GetVolumeSnapshotDeadline().Seconds()) + return time.Now().Unix()-data.VolumeSnapshotFirstDetectedFailure > deadline, nil +} + +type metadata struct { + // VolumeSnapshotFirstDetectedFailure is UNIX the timestamp when the first volume snapshot failure was detected + VolumeSnapshotFirstDetectedFailure int64 `json:"volumeSnapshotFirstFailure,omitempty"` +} + +func unmarshalMetadata(rawData string) (*metadata, error) { + var data metadata + if err := json.Unmarshal([]byte(rawData), &data); err != nil { + return nil, fmt.Errorf("while unmarshalling metadata: %w", err) + } + + if data.VolumeSnapshotFirstDetectedFailure == 0 { + return nil, fmt.Errorf("no volumeSnapshotFirstFailure found in plugin metadata: %s", pluginName) + } + + return &data, nil +} + +func addDeadlineStatus(ctx context.Context, cli client.Client, backup *apiv1.Backup) error { + if value, ok := backup.Status.PluginMetadata[pluginName]; ok { + if _, err := unmarshalMetadata(value); err == nil { + return nil + } + } + + data := &metadata{VolumeSnapshotFirstDetectedFailure: time.Now().Unix()} + rawData, err := json.Marshal(data) + if err != nil { + return err + } + + if backup.Status.PluginMetadata == nil { + backup.Status.PluginMetadata = map[string]string{} + } + + origBackup := backup.DeepCopy() + backup.Status.PluginMetadata[pluginName] = string(rawData) + + return cli.Status().Patch(ctx, backup, client.MergeFrom(origBackup)) +} diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go index 8ce7adc393..31165768f2 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot import ( "context" + "encoding/json" "fmt" "time" @@ -451,3 +455,137 @@ var _ = Describe("annotateSnapshotsWithBackupData", func() { } }) }) + +var _ = Describe("addDeadlineStatus", func() { + var ( + ctx context.Context + backup *apiv1.Backup + cli k8client.Client + ) + + BeforeEach(func() { + ctx = context.TODO() + backup = &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-backup", + }, + Status: apiv1.BackupStatus{ + PluginMetadata: make(map[string]string), + }, + } + cli = fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()). + WithObjects(backup). + WithStatusSubresource(&apiv1.Backup{}). + Build() + }) + + It("should add deadline status if not present", func() { + err := addDeadlineStatus(ctx, cli, backup) + Expect(err).ToNot(HaveOccurred()) + + var updatedBackup apiv1.Backup + err = cli.Get(ctx, types.NamespacedName{Name: backup.Name, Namespace: backup.Namespace}, &updatedBackup) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedBackup.Status.PluginMetadata).To(HaveKey(pluginName)) + Expect(updatedBackup.Status.PluginMetadata[pluginName]).ToNot(BeEmpty()) + Expect(updatedBackup.Status.PluginMetadata[pluginName]).To(MatchRegexp(`{"volumeSnapshotFirstFailure":\d+}`)) + }) + + It("should not modify deadline status if already present", func() { + backup.Status.PluginMetadata[pluginName] = `{"volumeSnapshotFirstFailure": 1234567890}` + err := cli.Status().Update(ctx, backup) + Expect(err).ToNot(HaveOccurred()) + + err = addDeadlineStatus(ctx, cli, backup) + Expect(err).ToNot(HaveOccurred()) + + var updatedBackup apiv1.Backup + err = cli.Get(ctx, types.NamespacedName{Name: backup.Name, Namespace: backup.Namespace}, &updatedBackup) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedBackup.Status.PluginMetadata[pluginName]).To(Equal(`{"volumeSnapshotFirstFailure": 1234567890}`)) + }) +}) + +var _ = Describe("isDeadlineExceeded", func() { + var backup *apiv1.Backup + + BeforeEach(func() { + backup = &apiv1.Backup{ + Status: apiv1.BackupStatus{ + PluginMetadata: make(map[string]string), + }, + } + }) + + It("should return an error if plugin metadata is empty", func() { + _, err := isDeadlineExceeded(backup) + Expect(err).To(HaveOccurred()) + }) + + It("should return error if unmarshalling fails", func() { + backup.Status.PluginMetadata[pluginName] = "invalid-json" + exceeded, err := isDeadlineExceeded(backup) + Expect(err).To(HaveOccurred()) + Expect(exceeded).To(BeFalse()) + }) + + It("should return error if no volumeSnapshotFirstFailure found in plugin metadata", func() { + backup.Status.PluginMetadata[pluginName] = `{}` + exceeded, err := isDeadlineExceeded(backup) + Expect(err).To(HaveOccurred()) + Expect(exceeded).To(BeFalse()) + }) + + It("should return false if deadline has not exceeded", func() { + data := metadata{VolumeSnapshotFirstDetectedFailure: time.Now().Unix()} + rawData, _ := json.Marshal(data) + backup.Status.PluginMetadata[pluginName] = string(rawData) + backup.Annotations = map[string]string{utils.BackupVolumeSnapshotDeadlineAnnotationName: "10"} + + exceeded, err := isDeadlineExceeded(backup) + Expect(err).ToNot(HaveOccurred()) + Expect(exceeded).To(BeFalse()) + }) + + It("should return true if deadline has exceeded", func() { + data := metadata{VolumeSnapshotFirstDetectedFailure: time.Now().Add(-20 * time.Minute).Unix()} + rawData, _ := json.Marshal(data) + backup.Status.PluginMetadata[pluginName] = string(rawData) + backup.Annotations = map[string]string{utils.BackupVolumeSnapshotDeadlineAnnotationName: "10"} + + exceeded, err := isDeadlineExceeded(backup) + Expect(err).ToNot(HaveOccurred()) + Expect(exceeded).To(BeTrue()) + }) +}) + +var _ = Describe("unmarshalMetadata", func() { + It("should unmarshal valid metadata correctly", func() { + rawData := `{"volumeSnapshotFirstFailure": 1234567890}` + data, err := unmarshalMetadata(rawData) + Expect(err).ToNot(HaveOccurred()) + Expect(data.VolumeSnapshotFirstDetectedFailure).To(Equal(int64(1234567890))) + }) + + It("should return an error if rawData is invalid JSON", func() { + rawData := `invalid-json` + data, err := unmarshalMetadata(rawData) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) + + It("should return an error if volumeSnapshotFirstFailure is missing", func() { + rawData := `{}` + data, err := unmarshalMetadata(rawData) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) + + It("should return an error if volumeSnapshotFirstFailure is zero", func() { + rawData := `{"volumeSnapshotFirstFailure": 0}` + data, err := unmarshalMetadata(rawData) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) +}) diff --git a/pkg/reconciler/backup/volumesnapshot/resources.go b/pkg/reconciler/backup/volumesnapshot/resources.go index 5e8231f94e..04a4aa761b 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources.go +++ b/pkg/reconciler/backup/volumesnapshot/resources.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot @@ -19,7 +22,6 @@ package volumesnapshot import ( "context" "fmt" - "strings" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -27,6 +29,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) +const pluginName = "cnpg_volumesnapshot" + // volumeSnapshotInfo host information about a volume snapshot type volumeSnapshotInfo struct { // error contains the raised error when the volume snapshot terminated @@ -69,26 +73,15 @@ func (err volumeSnapshotError) Error() string { // IsRetryable returns true if the external snapshotter controller // will retry taking the snapshot func (err volumeSnapshotError) isRetryable() bool { + // The Kubernetes CSI driver/controller will automatically retry snapshot creation + // for certain errors, including timeouts. We use pattern matching to identify + // these retryable errors and handle them appropriately. + if err.InternalError.Message == nil { return false } - // Obviously this is a heuristic, but unfortunately we don't have - // the information we need. - // We're trying to handle the cases where the external-snapshotter - // controller failed on a conflict with the following error: - // - // > the object has been modified; please apply your changes to the - // > latest version and try again - - // TODO: instead of blindingly retry on matching errors, we - // should enhance our CRD with a configurable deadline. After - // the deadline have been met on err.InternalError.CreatedAt - // the backup can be marked as failed - - return strings.Contains( - *err.InternalError.Message, - "the object has been modified") + return isCSIErrorMessageRetriable(*err.InternalError.Message) } // slice represents a slice of []storagesnapshotv1.VolumeSnapshot @@ -112,7 +105,7 @@ func getBackupVolumeSnapshots( ctx context.Context, cli client.Client, namespace string, - backupLabelName string, + backupName string, ) (slice, error) { var list storagesnapshotv1.VolumeSnapshotList @@ -120,7 +113,7 @@ func getBackupVolumeSnapshots( ctx, &list, client.InNamespace(namespace), - client.MatchingLabels{utils.BackupNameLabelName: backupLabelName}, + client.MatchingLabels{utils.BackupNameLabelName: backupName}, ); err != nil { return nil, err } diff --git a/pkg/reconciler/backup/volumesnapshot/resources_test.go b/pkg/reconciler/backup/volumesnapshot/resources_test.go index ea65460e57..b58210ef2d 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources_test.go +++ b/pkg/reconciler/backup/volumesnapshot/resources_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/suite_test.go b/pkg/reconciler/backup/volumesnapshot/suite_test.go index 98ea99c492..1e926187ad 100644 --- a/pkg/reconciler/backup/volumesnapshot/suite_test.go +++ b/pkg/reconciler/backup/volumesnapshot/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/hibernation/doc.go b/pkg/reconciler/hibernation/doc.go index 0e3392f678..2c7906f2a9 100644 --- a/pkg/reconciler/hibernation/doc.go +++ b/pkg/reconciler/hibernation/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package hibernation contains all the logic to hibernate a CNPG cluster diff --git a/pkg/reconciler/hibernation/reconciler.go b/pkg/reconciler/hibernation/reconciler.go index 4265278b2b..0f2cbe43d8 100644 --- a/pkg/reconciler/hibernation/reconciler.go +++ b/pkg/reconciler/hibernation/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation @@ -79,9 +82,12 @@ func reconcileDeletePods( podToBeDeleted = &instances[0] } - // The Pod list is sorted and the primary instance + // The Pod list is sorted, and the primary instance // will always be the first one, if present contextLogger.Info("Deleting Pod as requested by the hibernation procedure", "podName", podToBeDeleted.Name) - deletionResult := c.Delete(ctx, podToBeDeleted) - return &ctrl.Result{RequeueAfter: 5 * time.Second}, deletionResult + if err := c.Delete(ctx, podToBeDeleted); err != nil { + return nil, err + } + + return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil } diff --git a/pkg/reconciler/hibernation/reconciler_test.go b/pkg/reconciler/hibernation/reconciler_test.go index bd27fd90e7..8385e957af 100644 --- a/pkg/reconciler/hibernation/reconciler_test.go +++ b/pkg/reconciler/hibernation/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/hibernation/status.go b/pkg/reconciler/hibernation/status.go index 8f16caa2a3..bb07f72bd6 100644 --- a/pkg/reconciler/hibernation/status.go +++ b/pkg/reconciler/hibernation/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/hibernation/status_test.go b/pkg/reconciler/hibernation/status_test.go index ba0012451f..9e52b3f864 100644 --- a/pkg/reconciler/hibernation/status_test.go +++ b/pkg/reconciler/hibernation/status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation @@ -44,7 +47,7 @@ var _ = Describe("Hibernation annotation management", func() { } Expect(isHibernationEnabled(&cluster)).To(BeTrue()) - cluster.ObjectMeta.Annotations[utils.HibernationAnnotationName] = HibernationOff + cluster.Annotations[utils.HibernationAnnotationName] = HibernationOff Expect(isHibernationEnabled(&cluster)).To(BeFalse()) }) }) diff --git a/pkg/reconciler/hibernation/suite_test.go b/pkg/reconciler/hibernation/suite_test.go index eb2cce6cd8..74db02902a 100644 --- a/pkg/reconciler/hibernation/suite_test.go +++ b/pkg/reconciler/hibernation/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/instance/certificate/doc.go b/pkg/reconciler/instance/certificate/doc.go new file mode 100644 index 0000000000..e495312140 --- /dev/null +++ b/pkg/reconciler/instance/certificate/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package certificate contains the reconciler for the PostgreSQL instance manager secrets +package certificate diff --git a/pkg/reconciler/instance/certificate/reconciler.go b/pkg/reconciler/instance/certificate/reconciler.go new file mode 100644 index 0000000000..1a194f3f3e --- /dev/null +++ b/pkg/reconciler/instance/certificate/reconciler.go @@ -0,0 +1,375 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package certificate + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" +) + +// Reconciler returns a certificate reconciler +type Reconciler struct { + cli client.Client + serverCertificateHandler serverCertificateHandler +} + +// ErrNoServerCertificateHandler is raised when a new server +// certificate has been detected by the instance reconciler +// but no handler has been set. +var ErrNoServerCertificateHandler = fmt.Errorf("no server certificate handler") + +// NewReconciler creates a new certificate reconciler +func NewReconciler(cli client.Client, serverHandler serverCertificateHandler) *Reconciler { + return &Reconciler{ + cli: cli, + serverCertificateHandler: serverHandler, + } +} + +type serverCertificateHandler interface { + SetServerCertificate(certificate *tls.Certificate) + GetServerCertificate() *tls.Certificate +} + +// RefreshSecrets is called when the PostgreSQL secrets are changed +// and will refresh the contents of the file inside the Pod, without +// reloading the actual PostgreSQL instance. +// +// It returns a boolean flag telling if something changed. Usually +// the invoker will check that flag and reload the PostgreSQL +// instance it is up. +func (r *Reconciler) RefreshSecrets( + ctx context.Context, + cluster *apiv1.Cluster, +) (bool, error) { + type executor func(context.Context, *apiv1.Cluster) (bool, error) + + contextLogger := log.FromContext(ctx) + + var changed bool + + secretRefresher := func(cb executor) error { + localChanged, err := cb(ctx, cluster) + if err == nil { + changed = changed || localChanged + return nil + } + + if !apierrors.IsNotFound(err) { + return err + } + + return nil + } + + if err := secretRefresher(r.refreshServerCertificateFiles); err != nil { + contextLogger.Error(err, "Error while getting server secret") + return changed, err + } + if err := secretRefresher(r.refreshReplicationUserCertificate); err != nil { + contextLogger.Error(err, "Error while getting streaming replication secret") + return changed, err + } + if err := secretRefresher(r.refreshClientCA); err != nil { + contextLogger.Error(err, "Error while getting cluster CA Client secret") + return changed, err + } + if err := secretRefresher(r.refreshServerCA); err != nil { + contextLogger.Error(err, "Error while getting cluster CA Server secret") + return changed, err + } + if err := secretRefresher(r.refreshBarmanEndpointCA); err != nil { + contextLogger.Error(err, "Error while getting barman endpoint CA secret") + return changed, err + } + + return changed, nil +} + +// refreshServerCertificateFiles updates the latest server certificate files +// from the secrets and updates the instance certificate if it is missing or +// outdated. +// It returns true if the configuration has been changed or the instance +// certificate has been updated. +func (r *Reconciler) refreshServerCertificateFiles(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + contextLogger := log.FromContext(ctx) + + var secret corev1.Secret + + err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, + func() error { + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ServerTLSSecret}, + &secret) + if err != nil { + contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.", + "secret", cluster.Status.Certificates.ServerTLSSecret) + return err + } + return nil + }) + if err != nil { + return false, err + } + + changed, err := r.refreshCertificateFilesFromSecret( + ctx, + &secret, + postgresSpec.ServerCertificateLocation, + postgresSpec.ServerKeyLocation) + if err != nil { + return changed, err + } + + if r.serverCertificateHandler.GetServerCertificate() == nil || changed { + return changed, r.refreshInstanceCertificateFromSecret(&secret) + } + + return changed, nil +} + +// refreshReplicationUserCertificate updates the latest replication user certificates +// from the secrets and updates the corresponding files. +// It returns true if the configuration has been changed. +func (r *Reconciler) refreshReplicationUserCertificate( + ctx context.Context, + cluster *apiv1.Cluster, +) (bool, error) { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ReplicationTLSSecret}, + &secret) + if err != nil { + return false, err + } + + return r.refreshCertificateFilesFromSecret( + ctx, + &secret, + postgresSpec.StreamingReplicaCertificateLocation, + postgresSpec.StreamingReplicaKeyLocation) +} + +// refreshClientCA updates the latest client CA certificates from the secrets. +// It returns true if the configuration has been changed. +func (r *Reconciler) refreshClientCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ClientCASecret}, + &secret) + if err != nil { + return false, err + } + + return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ClientCACertificateLocation) +} + +// refreshServerCA gets the latest server CA certificates from the secrets. +// It returns true if the configuration has been changed. +func (r *Reconciler) refreshServerCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ServerCASecret}, + &secret) + if err != nil { + return false, err + } + + return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ServerCACertificateLocation) +} + +// refreshBarmanEndpointCA updates the barman endpoint CA certificates from the secrets. +// It returns true if the configuration has been changed. +// +// Important: this function is deprecated and will be replaced by the relative feature +// in the plugin-barman-cloud project +func (r *Reconciler) refreshBarmanEndpointCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + // refreshFileFromSecret receive a secret and rewrite the file corresponding to + // the key to the provided location. Implementated with an inner function to discourage + // reuse. + refreshFileFromSecret := func( + secret *corev1.Secret, + key, destLocation string, + ) (bool, error) { + contextLogger := log.FromContext(ctx) + data, ok := secret.Data[key] + if !ok { + return false, fmt.Errorf("missing %s entry in Secret", key) + } + + changed, err := fileutils.WriteFileAtomic(destLocation, data, 0o600) + if err != nil { + return false, fmt.Errorf("while writing file: %w", err) + } + + if changed { + contextLogger.Info("Refreshed configuration file", + "filename", destLocation, + "secret", secret.Name, + "key", key) + } + + return changed, nil + } + + endpointCAs := map[string]*apiv1.SecretKeySelector{} + if cluster.Spec.Backup.IsBarmanEndpointCASet() { + endpointCAs[postgresSpec.BarmanBackupEndpointCACertificateLocation] = cluster.Spec.Backup.BarmanObjectStore.EndpointCA + } + if replicaBarmanCA := cluster.GetBarmanEndpointCAForReplicaCluster(); replicaBarmanCA != nil { + endpointCAs[postgresSpec.BarmanRestoreEndpointCACertificateLocation] = replicaBarmanCA + } + if len(endpointCAs) == 0 { + return false, nil + } + + var changed bool + for target, secretKeySelector := range endpointCAs { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: secretKeySelector.Name}, + &secret) + if err != nil { + return false, err + } + c, err := refreshFileFromSecret(&secret, secretKeySelector.Key, target) + changed = changed || c + if err != nil { + return changed, err + } + } + return changed, nil +} + +// refreshCertificateFilesFromSecret receive a TLS secret, parses it and communicates +// back to the handler the certificate change event. +func (r *Reconciler) refreshInstanceCertificateFromSecret( + secret *corev1.Secret, +) error { + if r.serverCertificateHandler == nil { + return ErrNoServerCertificateHandler + } + + certData, ok := secret.Data[corev1.TLSCertKey] + if !ok { + return fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) + } + + keyData, ok := secret.Data[corev1.TLSPrivateKeyKey] + if !ok { + return fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) + } + + certificate, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return fmt.Errorf("failed decoding Secret: %w", err) + } + + r.serverCertificateHandler.SetServerCertificate(&certificate) + + return err +} + +// refreshCertificateFilesFromSecret receive a secret and rewrite the file +// corresponding to the server certificate. +func (r *Reconciler) refreshCertificateFilesFromSecret( + ctx context.Context, + secret *corev1.Secret, + certificateLocation string, + privateKeyLocation string, +) (bool, error) { + contextLogger := log.FromContext(ctx) + + certificate, ok := secret.Data[corev1.TLSCertKey] + if !ok { + return false, fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) + } + + privateKey, ok := secret.Data[corev1.TLSPrivateKeyKey] + if !ok { + return false, fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) + } + + certificateIsChanged, err := fileutils.WriteFileAtomic(certificateLocation, certificate, 0o600) + if err != nil { + return false, fmt.Errorf("while writing server certificate: %w", err) + } + + if certificateIsChanged { + contextLogger.Info("Refreshed configuration file", + "filename", certificateLocation, + "secret", secret.Name) + } + + privateKeyIsChanged, err := fileutils.WriteFileAtomic(privateKeyLocation, privateKey, 0o600) + if err != nil { + return false, fmt.Errorf("while writing server private key: %w", err) + } + + if privateKeyIsChanged { + contextLogger.Info("Refreshed configuration file", + "filename", privateKeyLocation, + "secret", secret.Name) + } + + return certificateIsChanged || privateKeyIsChanged, nil +} + +// refreshCAFromSecret receive a secret and rewrite the `ca.crt` file to the provided location. +func (r *Reconciler) refreshCAFromSecret( + ctx context.Context, + secret *corev1.Secret, + destLocation string, +) (bool, error) { + caCertificate, ok := secret.Data[certs.CACertKey] + if !ok { + return false, fmt.Errorf("missing %s entry in Secret", certs.CACertKey) + } + + changed, err := fileutils.WriteFileAtomic(destLocation, caCertificate, 0o600) + if err != nil { + return false, fmt.Errorf("while writing server certificate: %w", err) + } + + if changed { + log.FromContext(ctx).Info("Refreshed configuration file", + "filename", destLocation, + "secret", secret.Name) + } + + return changed, nil +} diff --git a/pkg/reconciler/instance/certificate/reconciler_test.go b/pkg/reconciler/instance/certificate/reconciler_test.go new file mode 100644 index 0000000000..9214ad11bb --- /dev/null +++ b/pkg/reconciler/instance/certificate/reconciler_test.go @@ -0,0 +1,178 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package certificate + +import ( + "crypto/tls" + "path" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + corev1 "k8s.io/api/core/v1" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type fakeServerCertificateHandler struct { + certificate *tls.Certificate +} + +func (f *fakeServerCertificateHandler) SetServerCertificate(certificate *tls.Certificate) { + f.certificate = certificate +} + +func (f *fakeServerCertificateHandler) GetServerCertificate() *tls.Certificate { + return f.certificate +} + +var _ = Describe("refresh certificate files from a secret", func() { + publicKeyContent := []byte("public_key") + privateKeyContent := []byte("private_key") + fakeReconciler := Reconciler{} + fakeSecret := corev1.Secret{ + Data: map[string][]byte{ + corev1.TLSCertKey: publicKeyContent, + corev1.TLSPrivateKeyKey: privateKeyContent, + }, + } + + It("writing the required files into a directory", func(ctx SpecContext) { + tempDir := GinkgoT().TempDir() + certificateLocation := path.Join(tempDir, "tls.crt") + privateKeyLocation := path.Join(tempDir, "tls.key") + + By("having code create new files", func() { + status, err := fakeReconciler.refreshCertificateFilesFromSecret( + ctx, &fakeSecret, certificateLocation, privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + + writtenPublicKey, err := fileutils.ReadFile(certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenPublicKey).To(Equal(publicKeyContent)) + + writtenPrivateKey, err := fileutils.ReadFile(privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenPrivateKey).To(Equal(privateKeyContent)) + }) + + By("writing again the same data, and verifying that the certificate refresh is not triggered", func() { + status, err := fakeReconciler.refreshCertificateFilesFromSecret( + ctx, &fakeSecret, certificateLocation, privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + + By("changing the file contents, and verifying that the certificate refresh is triggered", func() { + newPublicKeyContent := []byte("changed public key") + newPrivateKeyContent := []byte("changed private key") + + changedSecret := fakeSecret.DeepCopy() + changedSecret.Data[corev1.TLSCertKey] = newPublicKeyContent + changedSecret.Data[corev1.TLSPrivateKeyKey] = newPrivateKeyContent + + status, err := fakeReconciler.refreshCertificateFilesFromSecret( + ctx, changedSecret, certificateLocation, privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + }) + }) +}) + +var _ = Describe("refresh CA from a secret", func() { + publicKeyContent := []byte("public_key") + fakeReconciler := Reconciler{} + fakeSecret := corev1.Secret{ + Data: map[string][]byte{ + certs.CACertKey: publicKeyContent, + }, + } + + It("writing the required files into a directory", func(ctx SpecContext) { + tempDir := GinkgoT().TempDir() + certificateLocation := path.Join(tempDir, "ca.crt") + + By("having code create new files", func() { + status, err := fakeReconciler.refreshCAFromSecret( + ctx, &fakeSecret, certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + + writtenPublicKey, err := fileutils.ReadFile(certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenPublicKey).To(Equal(publicKeyContent)) + }) + + By("writing again the same data, and verifying that the certificate refresh is not triggered", func() { + status, err := fakeReconciler.refreshCAFromSecret( + ctx, &fakeSecret, certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + + By("changing the file contents, and verifying that the certificate refresh is triggered", func() { + newPublicKeyContent := []byte("changed public key") + + changedSecret := fakeSecret.DeepCopy() + changedSecret.Data[certs.CACertKey] = newPublicKeyContent + + status, err := fakeReconciler.refreshCAFromSecret( + ctx, changedSecret, certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + }) + }) +}) + +var _ = Describe("server certificate refresh handler", func() { + It("refresh the server certificate", func() { + var secret *corev1.Secret + + By("creating a new root CA", func() { + root, err := certs.CreateRootCA("common-name", "organization-unit") + Expect(err).ToNot(HaveOccurred()) + + pair, err := root.CreateAndSignPair("host", certs.CertTypeServer, nil) + Expect(err).ToNot(HaveOccurred()) + + secret = pair.GenerateCertificateSecret("default", "pair") + }) + + By("triggering the certificate refresh when no handler is set", func() { + fakeReconciler := Reconciler{} + err := fakeReconciler.refreshInstanceCertificateFromSecret(secret) + Expect(err).Error().Should(Equal(ErrNoServerCertificateHandler)) + }) + + By("triggering the certificate refresh when a handler is set", func() { + fakeReconciler := Reconciler{ + serverCertificateHandler: &fakeServerCertificateHandler{}, + } + + err := fakeReconciler.refreshInstanceCertificateFromSecret(secret) + Expect(err).ShouldNot(HaveOccurred()) + + cert := fakeReconciler.serverCertificateHandler.GetServerCertificate() + Expect(cert).ToNot(BeNil()) + }) + }) +}) diff --git a/pkg/reconciler/instance/certificate/suite_test.go b/pkg/reconciler/instance/certificate/suite_test.go new file mode 100644 index 0000000000..4337e69e53 --- /dev/null +++ b/pkg/reconciler/instance/certificate/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package certificate + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCertificate(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Certificate Reconciler") +} diff --git a/pkg/reconciler/instance/doc.go b/pkg/reconciler/instance/doc.go index b410b4211b..e32da09ac2 100644 --- a/pkg/reconciler/instance/doc.go +++ b/pkg/reconciler/instance/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package instance contains all the logic to reconcile an instance pod diff --git a/pkg/reconciler/instance/metadata.go b/pkg/reconciler/instance/metadata.go index dae9f9b311..08a97b7346 100644 --- a/pkg/reconciler/instance/metadata.go +++ b/pkg/reconciler/instance/metadata.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package instance @@ -184,11 +187,11 @@ func updateRoleLabels( // it is important to note that even if utils.ClusterRoleLabelName is deprecated, // we still ensure that the values are aligned between the two fields - podRole, hasRole := instance.ObjectMeta.Labels[utils.ClusterRoleLabelName] - newPodRole, newHasRole := instance.ObjectMeta.Labels[utils.ClusterInstanceRoleLabelName] + podRole, hasRole := instance.Labels[utils.ClusterRoleLabelName] + newPodRole, newHasRole := instance.Labels[utils.ClusterInstanceRoleLabelName] - switch { - case instance.Name == cluster.Status.CurrentPrimary: + switch instance.Name { + case cluster.Status.CurrentPrimary: if !hasRole || podRole != specs.ClusterRoleLabelPrimary || !newHasRole || newPodRole != specs.ClusterRoleLabelPrimary { contextLogger.Info("Setting primary label", "pod", instance.Name) diff --git a/pkg/reconciler/instance/metadata_test.go b/pkg/reconciler/instance/metadata_test.go index 8b99f3753e..23a469e737 100644 --- a/pkg/reconciler/instance/metadata_test.go +++ b/pkg/reconciler/instance/metadata_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package instance @@ -27,6 +30,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -395,8 +399,11 @@ var _ = Describe("object metadata test", func() { It("Should not change annotations if they already match the cluster's", func() { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Annotations: map[string]string{key: value}, + Name: "pod1", + Annotations: map[string]string{ + key: value, + utils.OperatorVersionAnnotationName: versions.Version, + }, }, } @@ -409,12 +416,16 @@ var _ = Describe("object metadata test", func() { }, } - Expect(cluster.Spec.InheritedMetadata.Annotations).To(Equal(cluster.GetFixedInheritedAnnotations())) + expectedAnnotations := cluster.GetFixedInheritedAnnotations() + expectedAnnotations[utils.OperatorVersionAnnotationName] = versions.Version + + Expect(expectedAnnotations).To(Equal(cluster.GetFixedInheritedAnnotations())) updated := updateClusterAnnotations(context.Background(), cluster, pod) Expect(updated).To(BeFalse()) - Expect(pod.Annotations).To(HaveLen(1)) + Expect(pod.Annotations).To(HaveLen(2)) Expect(pod.Annotations[key]).To(Equal(value)) + Expect(pod.Annotations[utils.OperatorVersionAnnotationName]).To(Equal(versions.Version)) }) It("Should correctly add AppArmor annotations if present in the cluster's annotations", func() { @@ -445,13 +456,16 @@ var _ = Describe("object metadata test", func() { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", + Annotations: map[string]string{ + utils.OperatorVersionAnnotationName: versions.Version, + }, }, } cluster := &apiv1.Cluster{} updated := updateClusterAnnotations(context.Background(), cluster, pod) Expect(updated).To(BeFalse()) - Expect(pod.Annotations).To(BeEmpty()) + Expect(pod.Annotations).To(HaveLen(1)) }) }) }) @@ -564,7 +578,7 @@ var _ = Describe("metadata update functions", func() { It("Should updateClusterAnnotations correctly", func() { modified := updateClusterAnnotations(ctx, cluster, instance) Expect(modified).To(BeTrue()) - Expect(instance.Annotations).To(Equal(cluster.Spec.InheritedMetadata.Annotations)) + Expect(instance.Annotations).To(Equal(cluster.GetFixedInheritedAnnotations())) }) }) }) diff --git a/pkg/management/postgres/readiness/doc.go b/pkg/reconciler/instance/storage/doc.go similarity index 71% rename from pkg/management/postgres/readiness/doc.go rename to pkg/reconciler/instance/storage/doc.go index b9af9c89f2..276a132db7 100644 --- a/pkg/management/postgres/readiness/doc.go +++ b/pkg/reconciler/instance/storage/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,8 +13,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package readiness contains the code needed to check -// if PostgreSQL is ready to accept client connections. -package readiness +// Package storage contains the Instance storage reconcilers +package storage diff --git a/pkg/reconciler/instance/storage/reconciler.go b/pkg/reconciler/instance/storage/reconciler.go new file mode 100644 index 0000000000..b385c80fcd --- /dev/null +++ b/pkg/reconciler/instance/storage/reconciler.go @@ -0,0 +1,92 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package storage + +import ( + "context" + "io/fs" + "os" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" +) + +type walDirectoryReconcilerOptions struct { + // pgWalDirectory is the directory where PostgreSQL will look for WALs. + // This is usually $PGDATA/pg_wal, and will be a symbolic link pointing + // to the separate WAL storage if configured. + pgWalDirectory string + + // walVolumeDirectory is the directory where the WAL volume is mounted. + walVolumeDirectory string + + // walVolumeWalDirectory is the directory where the WALs should be stored. + // This is usually inside of walVolumeDirectory + walVolumeWalDirectory string +} + +// ReconcileWalDirectory ensures that the `pg_wal` directory is moved to the attached volume (if present) +// and creates a symbolic link pointing to the new location. +func ReconcileWalDirectory(ctx context.Context) error { + return internalReconcileWalDirectory(ctx, walDirectoryReconcilerOptions{ + pgWalDirectory: specs.PgWalPath, + walVolumeDirectory: specs.PgWalVolumePath, + walVolumeWalDirectory: specs.PgWalVolumePgWalPath, + }) +} + +// internalReconcileWalDirectory is only meant to be used internally by unit tests +func internalReconcileWalDirectory(ctx context.Context, opts walDirectoryReconcilerOptions) error { + contextLogger := log.FromContext(ctx) + + // Important: for now walStorage cannot be disabled once configured + if pgWalExists, err := fileutils.FileExists(opts.walVolumeDirectory); err != nil { + return err + } else if !pgWalExists { + return nil + } + + // Check if `pg_wal` is already a symbolic link; if so, no further action is needed. + pgWalDirInfo, err := os.Lstat(opts.pgWalDirectory) + if err != nil { + return err + } + if pgWalDirInfo.Mode().Type() == fs.ModeSymlink { + return nil + } + + contextLogger.Info("Moving data", "from", opts.pgWalDirectory, "to", opts.walVolumeWalDirectory) + if err := fileutils.MoveDirectoryContent(opts.pgWalDirectory, opts.walVolumeWalDirectory); err != nil { + contextLogger.Error(err, "Moving data", "from", opts.pgWalDirectory, "to", + opts.walVolumeWalDirectory) + return err + } + + contextLogger.Debug("Deleting old path", "path", opts.pgWalDirectory) + if err := fileutils.RemoveFile(opts.pgWalDirectory); err != nil { + contextLogger.Error(err, "Deleting old path", "path", opts.pgWalDirectory) + return err + } + + contextLogger.Debug("Creating symlink", "from", opts.pgWalDirectory, "to", opts.walVolumeWalDirectory) + return os.Symlink(opts.walVolumeWalDirectory, opts.pgWalDirectory) +} diff --git a/pkg/reconciler/instance/storage/reconciler_test.go b/pkg/reconciler/instance/storage/reconciler_test.go new file mode 100644 index 0000000000..41e17f143a --- /dev/null +++ b/pkg/reconciler/instance/storage/reconciler_test.go @@ -0,0 +1,112 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package storage + +import ( + "io/fs" + "os" + "path" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("WAL Storage reconciler", func() { + var pgDataDir string + var separateWALVolumeDir string + var separateWALVolumeWALDir string + var opts walDirectoryReconcilerOptions + + BeforeEach(func() { + tempDir := GinkgoT().TempDir() + pgDataDir = path.Join(tempDir, "pg_data") + separateWALVolumeDir = path.Join(tempDir, "separate_wal") + separateWALVolumeWALDir = path.Join(tempDir, "separate_wal", "pg_wal") + + opts = walDirectoryReconcilerOptions{ + pgWalDirectory: path.Join(pgDataDir, "pg_wal"), + walVolumeDirectory: separateWALVolumeDir, + walVolumeWalDirectory: separateWALVolumeWALDir, + } + }) + + It("will not error out if a separate WAL storage doesn't exist", func(ctx SpecContext) { + err := internalReconcileWalDirectory(ctx, opts) + Expect(err).ToNot(HaveOccurred()) + }) + + It("won't change anything if pg_wal is already a symlink", func(ctx SpecContext) { + err := fileutils.EnsureDirectoryExists(pgDataDir) + Expect(err).ToNot(HaveOccurred()) + + err = fileutils.EnsureDirectoryExists(separateWALVolumeDir) + Expect(err).ToNot(HaveOccurred()) + + err = os.Symlink(separateWALVolumeDir, opts.pgWalDirectory) + Expect(err).ToNot(HaveOccurred()) + + err = internalReconcileWalDirectory(ctx, opts) + Expect(err).ToNot(HaveOccurred()) + }) + + It("moves the existing WALs to the target volume", func(ctx SpecContext) { + wal1 := path.Join(opts.pgWalDirectory, "000000010000000100000001") + wal2 := path.Join(opts.pgWalDirectory, "000000010000000100000002") + wal3 := path.Join(opts.pgWalDirectory, "000000010000000100000003") + + By("creating a pg_wal directory and a separate WAL volume directory", func() { + err := fileutils.EnsureDirectoryExists(opts.pgWalDirectory) + Expect(err).ToNot(HaveOccurred()) + + err = fileutils.EnsureDirectoryExists(separateWALVolumeDir) + Expect(err).ToNot(HaveOccurred()) + }) + + By("creating a few WALs file in pg_wal", func() { + _, err := fileutils.WriteStringToFile(wal1, "wal content") + Expect(err).ToNot(HaveOccurred()) + + _, err = fileutils.WriteStringToFile(wal2, "wal content") + Expect(err).ToNot(HaveOccurred()) + + _, err = fileutils.WriteStringToFile(wal3, "wal content") + Expect(err).ToNot(HaveOccurred()) + }) + + By("reconciling the WALs to the target volume", func() { + err := internalReconcileWalDirectory(ctx, opts) + Expect(err).ToNot(HaveOccurred()) + }) + + By("checking if pg_wal is a symlink", func() { + pgWalDirInfo, err := os.Lstat(opts.pgWalDirectory) + Expect(err).ToNot(HaveOccurred()) + Expect(pgWalDirInfo.Mode().Type()).To(Equal(fs.ModeSymlink)) + }) + + By("checking the WAL files are in the target volume", func() { + Expect(fileutils.FileExists(wal1)).To(BeTrue()) + Expect(fileutils.FileExists(wal2)).To(BeTrue()) + Expect(fileutils.FileExists(wal3)).To(BeTrue()) + }) + }) +}) diff --git a/pkg/reconciler/instance/storage/suite_test.go b/pkg/reconciler/instance/storage/suite_test.go new file mode 100644 index 0000000000..764c55395f --- /dev/null +++ b/pkg/reconciler/instance/storage/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package storage + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestStorage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Storage Reconciler") +} diff --git a/pkg/reconciler/instance/suite_test.go b/pkg/reconciler/instance/suite_test.go index c2e88914f6..5d3ff5a753 100644 --- a/pkg/reconciler/instance/suite_test.go +++ b/pkg/reconciler/instance/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package instance diff --git a/pkg/reconciler/majorupgrade/doc.go b/pkg/reconciler/majorupgrade/doc.go new file mode 100644 index 0000000000..f75d3082f2 --- /dev/null +++ b/pkg/reconciler/majorupgrade/doc.go @@ -0,0 +1,30 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package majorupgrade provides the logic for upgrading a PostgreSQL cluster +// to a new major version. +// +// The upgrade process consists of the following steps: +// +// 1. Delete all Pods in the cluster. +// 2. Create and initiate the major upgrade job. +// 3. Wait for the job to complete. +// 4. If the upgrade job completes successfully, start new Pods for the upgraded version. +// Otherwise, stop and wait for input by the user. +package majorupgrade diff --git a/pkg/reconciler/majorupgrade/job.go b/pkg/reconciler/majorupgrade/job.go new file mode 100644 index 0000000000..0c1ec4811b --- /dev/null +++ b/pkg/reconciler/majorupgrade/job.go @@ -0,0 +1,85 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +const jobMajorUpgrade = "major-upgrade" + +// isMajorUpgradeJob tells if the passed Job definition corresponds to +// the job handling the major upgrade +func isMajorUpgradeJob(job *batchv1.Job) bool { + return job.GetLabels()[utils.JobRoleLabelName] == string(jobMajorUpgrade) +} + +// getTargetImageFromMajorUpgradeJob gets the image that is being used as +// target of the major upgrade process. +func getTargetImageFromMajorUpgradeJob(job *batchv1.Job) (string, bool) { + if !isMajorUpgradeJob(job) { + return "", false + } + + for _, container := range job.Spec.Template.Spec.Containers { + if container.Name == string(jobMajorUpgrade) { + return container.Image, true + } + } + + return "", false +} + +// createMajorUpgradeJobDefinition creates a job to upgrade the primary node to a new Postgres major version +func createMajorUpgradeJobDefinition(cluster *apiv1.Cluster, nodeSerial int) *batchv1.Job { + prepareCommand := []string{ + "/controller/manager", + "instance", + "upgrade", + "prepare", + "/controller/old", + } + oldVersionInitContainer := corev1.Container{ + Name: "prepare", + Image: cluster.Status.PGDataImageInfo.Image, + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Command: prepareCommand, + VolumeMounts: specs.CreatePostgresVolumeMounts(*cluster), + Resources: cluster.Spec.Resources, + SecurityContext: specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), + } + + majorUpgradeCommand := []string{ + "/controller/manager", + "instance", + "upgrade", + "execute", + "/controller/old/bindir.txt", + } + job := specs.CreatePrimaryJob(*cluster, nodeSerial, jobMajorUpgrade, majorUpgradeCommand) + job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, oldVersionInitContainer) + + return job +} diff --git a/pkg/reconciler/majorupgrade/job_test.go b/pkg/reconciler/majorupgrade/job_test.go new file mode 100644 index 0000000000..329140d5c1 --- /dev/null +++ b/pkg/reconciler/majorupgrade/job_test.go @@ -0,0 +1,75 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + batchv1 "k8s.io/api/batch/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Major upgrade Job generation", func() { + oldImageInfo := &apiv1.ImageInfo{ + Image: "postgres:16", + MajorVersion: 16, + } + newImageName := "postgres:17" + + cluster := apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: newImageName, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + }, + Status: apiv1.ClusterStatus{ + Image: newImageName, + PGDataImageInfo: oldImageInfo.DeepCopy(), + }, + } + + It("creates major upgrade jobs", func() { + majorUpgradeJob := createMajorUpgradeJobDefinition(&cluster, 1) + Expect(majorUpgradeJob).ToNot(BeNil()) + Expect(majorUpgradeJob.Spec.Template.Spec.Containers[0].Image).To(Equal(newImageName)) + }) + + It("is able to discover which target image was used", func() { + majorUpgradeJob := createMajorUpgradeJobDefinition(&cluster, 1) + Expect(majorUpgradeJob).ToNot(BeNil()) + + imgName, found := getTargetImageFromMajorUpgradeJob(majorUpgradeJob) + Expect(found).To(BeTrue()) + Expect(imgName).To(Equal(newImageName)) + }) + + DescribeTable( + "Tells major upgrade jobs apart from jobs of other types", + func(job *batchv1.Job, isMajorUpgrade bool) { + Expect(isMajorUpgradeJob(job)).To(Equal(isMajorUpgrade)) + }, + Entry("initdb jobs are not major upgrades", specs.CreatePrimaryJobViaInitdb(cluster, 1), false), + Entry("major-upgrade jobs are major upgrades", createMajorUpgradeJobDefinition(&cluster, 1), true), + ) +}) diff --git a/pkg/reconciler/majorupgrade/reconciler.go b/pkg/reconciler/majorupgrade/reconciler.go new file mode 100644 index 0000000000..673b7684d2 --- /dev/null +++ b/pkg/reconciler/majorupgrade/reconciler.go @@ -0,0 +1,294 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + "context" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" +) + +// ErrIncoherentMajorUpgradeJob is raised when the major upgrade job +// is missing the target image +var ErrIncoherentMajorUpgradeJob = fmt.Errorf("major upgrade job is missing the target image") + +// ErrNoPrimaryPVCFound is raised when the list of PVCs doesn't +// include any primary instance. +var ErrNoPrimaryPVCFound = fmt.Errorf("no primary PVC found") + +// Reconcile implements the major version upgrade logic. +func Reconcile( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + instances []corev1.Pod, + pvcs []corev1.PersistentVolumeClaim, + jobs []batchv1.Job, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + if majorUpgradeJob := getMajorUpdateJob(jobs); majorUpgradeJob != nil { + return majorVersionUpgradeHandleCompletion(ctx, c, cluster, majorUpgradeJob, pvcs) + } + + requestedMajor, err := cluster.GetPostgresqlMajorVersion() + if err != nil { + contextLogger.Error(err, "Unable to retrieve the requested PostgreSQL version") + return nil, err + } + if cluster.Status.PGDataImageInfo == nil || requestedMajor <= cluster.Status.PGDataImageInfo.MajorVersion { + return nil, nil + } + + primaryNodeSerial, err := getPrimarySerial(pvcs) + if err != nil || primaryNodeSerial == 0 { + contextLogger.Error(err, "Unable to retrieve the primary node serial") + return nil, err + } + + contextLogger.Info("Reconciling in-place major version upgrades", + "primaryNodeSerial", primaryNodeSerial, "requestedMajor", requestedMajor) + + err = registerPhase(ctx, c, cluster, apiv1.PhaseMajorUpgrade, + fmt.Sprintf("Upgrading cluster to major version %v", requestedMajor)) + if err != nil { + return nil, err + } + + if result, err := deleteAllPodsInMajorUpgradePreparation(ctx, c, instances, jobs); err != nil { + contextLogger.Error(err, "Unable to delete pods and jobs in preparation for major upgrade") + return nil, err + } else if result != nil { + return result, err + } + + if result, err := createMajorUpgradeJob(ctx, c, cluster, primaryNodeSerial); err != nil { + contextLogger.Error(err, "Unable to create major upgrade job") + return nil, err + } else if result != nil { + return result, err + } + + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil +} + +func getMajorUpdateJob(items []batchv1.Job) *batchv1.Job { + for _, job := range items { + if isMajorUpgradeJob(&job) { + return &job + } + } + + return nil +} + +func deleteAllPodsInMajorUpgradePreparation( + ctx context.Context, + c client.Client, + instances []corev1.Pod, + jobs []batchv1.Job, +) (*ctrl.Result, error) { + foundSomethingToDelete := false + + for _, pod := range instances { + if pod.GetDeletionTimestamp() != nil { + continue + } + + foundSomethingToDelete = true + if err := c.Delete(ctx, &pod); err != nil { + return nil, err + } + } + + for _, job := range jobs { + if job.GetDeletionTimestamp() != nil { + continue + } + + foundSomethingToDelete = true + if err := c.Delete(ctx, &job, &client.DeleteOptions{ + PropagationPolicy: ptr.To(metav1.DeletePropagationForeground), + }); err != nil { + return nil, err + } + } + + if foundSomethingToDelete { + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + return nil, nil +} + +func createMajorUpgradeJob( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + primaryNodeSerial int, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + job := createMajorUpgradeJobDefinition(cluster, primaryNodeSerial) + + if err := ctrl.SetControllerReference(cluster, job, c.Scheme()); err != nil { + contextLogger.Error(err, "Unable to set the owner reference for major upgrade job") + return nil, err + } + + utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) + utils.InheritAnnotations(&job.ObjectMeta, cluster.Annotations, + cluster.GetFixedInheritedAnnotations(), configuration.Current) + utils.InheritAnnotations(&job.Spec.Template.ObjectMeta, cluster.Annotations, + cluster.GetFixedInheritedAnnotations(), configuration.Current) + utils.InheritLabels(&job.ObjectMeta, cluster.Labels, + cluster.GetFixedInheritedLabels(), configuration.Current) + utils.InheritLabels(&job.Spec.Template.ObjectMeta, cluster.Labels, + cluster.GetFixedInheritedLabels(), configuration.Current) + utils.SetInstanceRole(job.Spec.Template.ObjectMeta, specs.ClusterRoleLabelPrimary) + + contextLogger.Info("Creating new major upgrade Job", + "jobName", job.Name, + "primary", true) + + if err := c.Create(ctx, job); err != nil { + if errors.IsAlreadyExists(err) { + // This Job was already created, maybe the cache is stale. + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + return nil, err + } + + return nil, nil +} + +func majorVersionUpgradeHandleCompletion( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + job *batchv1.Job, + pvcs []corev1.PersistentVolumeClaim, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + if !utils.JobHasOneCompletion(*job) { + contextLogger.Info("Major upgrade job not completed.") + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + for _, pvc := range pvcs { + if pvc.GetDeletionTimestamp() != nil { + continue + } + + if specs.IsPrimary(pvc.ObjectMeta) { + continue + } + + if err := c.Delete(ctx, &pvc); err != nil { + // Ignore if NotFound, otherwise report the error + if !errors.IsNotFound(err) { + return nil, err + } + } + } + + jobImage, ok := getTargetImageFromMajorUpgradeJob(job) + if !ok { + return nil, ErrIncoherentMajorUpgradeJob + } + + requestedMajor, err := cluster.GetPostgresqlMajorVersion() + if err != nil { + contextLogger.Error(err, "Unable to retrieve the requested PostgreSQL version") + return nil, err + } + + if err := status.PatchWithOptimisticLock( + ctx, + c, + cluster, + status.SetPGDataImageInfo(&apiv1.ImageInfo{ + Image: jobImage, + MajorVersion: requestedMajor, + }), + ); err != nil { + contextLogger.Error(err, "Unable to update cluster status after major upgrade completed.") + return nil, err + } + + if err := c.Delete(ctx, job, &client.DeleteOptions{ + PropagationPolicy: ptr.To(metav1.DeletePropagationForeground), + }); err != nil { + contextLogger.Error(err, "Unable to delete major upgrade job.") + return nil, err + } + + return &ctrl.Result{Requeue: true}, nil +} + +// registerPhase sets a phase into the cluster +func registerPhase( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + phase string, + reason string, +) error { + return status.PatchWithOptimisticLock( + ctx, + c, + cluster, + status.SetPhase(phase, reason), + status.SetClusterReadyCondition, + ) +} + +// getPrimarySerial tries to obtain the primary serial from a group of PVCs +func getPrimarySerial( + pvcs []corev1.PersistentVolumeClaim, +) (int, error) { + for _, pvc := range pvcs { + instanceRole, _ := utils.GetInstanceRole(pvc.Labels) + if instanceRole != specs.ClusterRoleLabelPrimary { + continue + } + + return specs.GetNodeSerial(pvc.ObjectMeta) + } + + return 0, ErrNoPrimaryPVCFound +} diff --git a/pkg/reconciler/majorupgrade/reconciler_test.go b/pkg/reconciler/majorupgrade/reconciler_test.go new file mode 100644 index 0000000000..3a307575ab --- /dev/null +++ b/pkg/reconciler/majorupgrade/reconciler_test.go @@ -0,0 +1,232 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Major upgrade job status reconciliation", func() { + It("waits until the job completed", func(ctx SpecContext) { + job := buildRunningUpgradeJob() + cluster := &apiv1.Cluster{} + fakeClient := fake.NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithRuntimeObjects(job, cluster). + WithStatusSubresource(cluster). + Build() + + result, err := majorVersionUpgradeHandleCompletion(ctx, fakeClient, cluster, job, nil) + Expect(err).ToNot(HaveOccurred()) + Expect(result).ToNot(BeNil()) + + // the job has not been deleted + Expect(job.ObjectMeta.DeletionTimestamp).To(BeNil()) + }) + + It("deletes the replica PVCs when and makes the cluster use the new image", func(ctx SpecContext) { + job := buildCompletedUpgradeJob() + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16", + }, + } + pvcs := []corev1.PersistentVolumeClaim{ + buildPrimaryPVC(1), + buildReplicaPVC(2), + buildReplicaPVC(3), + } + + objects := []runtime.Object{ + job, + cluster, + } + for i := range pvcs { + objects = append(objects, &pvcs[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithRuntimeObjects(objects...). + WithStatusSubresource(cluster). + Build() + + result, err := majorVersionUpgradeHandleCompletion(ctx, fakeClient, cluster, job, pvcs) + Expect(err).ToNot(HaveOccurred()) + Expect(result).ToNot(BeNil()) + Expect(*result).To(Equal(ctrl.Result{Requeue: true})) + + // the replica PVCs have been deleted + for i := range pvcs { + if !specs.IsPrimary(pvcs[i].ObjectMeta) { + var pvc corev1.PersistentVolumeClaim + err := fakeClient.Get(ctx, client.ObjectKeyFromObject(&pvcs[i]), &pvc) + Expect(err).To(MatchError(errors.IsNotFound, "is not found")) + } + } + + // the upgrade has been marked as done + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:16")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(16)) + + // the job has been deleted + var tempJob batchv1.Job + err = fakeClient.Get(ctx, client.ObjectKeyFromObject(job), &tempJob) + Expect(err).To(MatchError(errors.IsNotFound, "is not found")) + }) +}) + +var _ = Describe("Major upgrade job decoding", func() { + It("is able to find the target image", func() { + job := buildCompletedUpgradeJob() + imageName, ok := getTargetImageFromMajorUpgradeJob(job) + Expect(ok).To(BeTrue()) + Expect(imageName).To(Equal("postgres:16")) + }) +}) + +var _ = Describe("PVC metadata decoding", func() { + It("is able to find the serial number of the primary server", func() { + pvcs := []corev1.PersistentVolumeClaim{ + buildReplicaPVC(1), + buildPrimaryPVC(2), + } + + Expect(getPrimarySerial(pvcs)).To(Equal(2)) + }) + + It("raises an error if no primary PVC is found", func() { + pvcs := []corev1.PersistentVolumeClaim{ + buildReplicaPVC(1), + buildReplicaPVC(2), + } + + Expect(getPrimarySerial(pvcs)).Error().To(BeEquivalentTo(ErrNoPrimaryPVCFound)) + }) + + It("raises an error if the primary PVC has an invalid serial", func() { + pvcs := []corev1.PersistentVolumeClaim{ + buildReplicaPVC(1), + buildInvalidPrimaryPVC(2), + } + + Expect(getPrimarySerial(pvcs)).Error().To(HaveOccurred()) + }) +}) + +func buildPrimaryPVC(serial int) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-example-%d", serial), + Labels: map[string]string{ + utils.ClusterRoleLabelName: specs.ClusterRoleLabelPrimary, + }, + Annotations: map[string]string{ + utils.ClusterSerialAnnotationName: fmt.Sprintf("%v", serial), + }, + }, + } +} + +func buildInvalidPrimaryPVC(serial int) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-example-%d", serial), + Labels: map[string]string{ + utils.ClusterRoleLabelName: specs.ClusterRoleLabelPrimary, + }, + Annotations: map[string]string{ + utils.ClusterSerialAnnotationName: fmt.Sprintf("%v - this is a test", serial), + }, + }, + } +} + +func buildReplicaPVC(serial int) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-example-%d", serial), + Labels: map[string]string{ + utils.ClusterRoleLabelName: specs.ClusterRoleLabelReplica, + }, + Annotations: map[string]string{ + utils.ClusterSerialAnnotationName: fmt.Sprintf("%v", serial), + }, + }, + } +} + +func buildCompletedUpgradeJob() *batchv1.Job { + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example-major-upgrade", + Labels: map[string]string{ + utils.JobRoleLabelName: jobMajorUpgrade, + }, + }, + Spec: batchv1.JobSpec{ + Completions: ptr.To[int32](1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: jobMajorUpgrade, + Image: "postgres:16", + }, + }, + }, + }, + }, + Status: batchv1.JobStatus{ + Succeeded: 1, + }, + } +} + +func buildRunningUpgradeJob() *batchv1.Job { + return &batchv1.Job{ + Spec: batchv1.JobSpec{ + Completions: ptr.To[int32](1), + }, + Status: batchv1.JobStatus{ + Succeeded: 0, + }, + } +} diff --git a/pkg/reconciler/majorupgrade/suite_test.go b/pkg/reconciler/majorupgrade/suite_test.go new file mode 100644 index 0000000000..a4f8479de6 --- /dev/null +++ b/pkg/reconciler/majorupgrade/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMajorUpgrade(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Major upgrade reconciler") +} diff --git a/pkg/reconciler/persistentvolumeclaim/build.go b/pkg/reconciler/persistentvolumeclaim/build.go index d8c2492eb6..a47c3621fe 100644 --- a/pkg/reconciler/persistentvolumeclaim/build.go +++ b/pkg/reconciler/persistentvolumeclaim/build.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/build_test.go b/pkg/reconciler/persistentvolumeclaim/build_test.go index 71f7a6c5cf..a9747ec1ab 100644 --- a/pkg/reconciler/persistentvolumeclaim/build_test.go +++ b/pkg/reconciler/persistentvolumeclaim/build_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/calculator.go b/pkg/reconciler/persistentvolumeclaim/calculator.go index c15dbc1c1d..59920e6947 100644 --- a/pkg/reconciler/persistentvolumeclaim/calculator.go +++ b/pkg/reconciler/persistentvolumeclaim/calculator.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/calculator_test.go b/pkg/reconciler/persistentvolumeclaim/calculator_test.go index 6b06797b1d..68bd494544 100644 --- a/pkg/reconciler/persistentvolumeclaim/calculator_test.go +++ b/pkg/reconciler/persistentvolumeclaim/calculator_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/create.go b/pkg/reconciler/persistentvolumeclaim/create.go index 255a3f06fd..8885b27349 100644 --- a/pkg/reconciler/persistentvolumeclaim/create.go +++ b/pkg/reconciler/persistentvolumeclaim/create.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/create_test.go b/pkg/reconciler/persistentvolumeclaim/create_test.go index 5f8b08bc9e..6dcf250402 100644 --- a/pkg/reconciler/persistentvolumeclaim/create_test.go +++ b/pkg/reconciler/persistentvolumeclaim/create_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/delete.go b/pkg/reconciler/persistentvolumeclaim/delete.go index 9d2e51db6a..36e6f2664c 100644 --- a/pkg/reconciler/persistentvolumeclaim/delete.go +++ b/pkg/reconciler/persistentvolumeclaim/delete.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/delete_test.go b/pkg/reconciler/persistentvolumeclaim/delete_test.go index 204b251e83..e3d51b4437 100644 --- a/pkg/reconciler/persistentvolumeclaim/delete_test.go +++ b/pkg/reconciler/persistentvolumeclaim/delete_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/doc.go b/pkg/reconciler/persistentvolumeclaim/doc.go index 1bf1beecb5..09fe5b8d0d 100644 --- a/pkg/reconciler/persistentvolumeclaim/doc.go +++ b/pkg/reconciler/persistentvolumeclaim/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package persistentvolumeclaim contains all the logic to reconcile and build PVCS diff --git a/pkg/reconciler/persistentvolumeclaim/existing.go b/pkg/reconciler/persistentvolumeclaim/existing.go new file mode 100644 index 0000000000..b104d990c5 --- /dev/null +++ b/pkg/reconciler/persistentvolumeclaim/existing.go @@ -0,0 +1,164 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package persistentvolumeclaim + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +type reconciliationUnit func( + ctx context.Context, + c client.Client, + storageConfiguration *apiv1.StorageConfiguration, + pvc *corev1.PersistentVolumeClaim, +) error + +// reconcileExistingPVCs align the existing pvcs to the desired state +func reconcileExistingPVCs( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + pvcs []corev1.PersistentVolumeClaim, +) error { + if len(pvcs) == 0 { + return nil + } + + contextLogger := log.FromContext(ctx) + + var reconciliationUnits []reconciliationUnit + + if cluster.ShouldResizeInUseVolumes() { + reconciliationUnits = append(reconciliationUnits, reconcilePVCQuantity) + } + if cluster.Spec.StorageConfiguration.PersistentVolumeClaimTemplate != nil { + reconciliationUnits = append(reconciliationUnits, reconcileVolumeAttributeClass) + } + + if len(reconciliationUnits) == 0 { + return nil + } + + for idx := range pvcs { + pvc := &pvcs[idx] + + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + if err != nil { + contextLogger.Error(err, + "encountered an error while trying to get pvc role from label", + "role", pvc.Labels[utils.PvcRoleLabelName], + ) + return err + } + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + if err != nil { + contextLogger.Error(err, + "encountered an error while trying to obtain the storage configuration", + "role", pvc.Labels[utils.PvcRoleLabelName], + "pvcName", pvc.Name, + ) + return err + } + + for _, reconciler := range reconciliationUnits { + if err := reconciler(ctx, c, &storageConfiguration, pvc); err != nil { + return err + } + } + } + + return nil +} + +func reconcileVolumeAttributeClass( + ctx context.Context, + c client.Client, + storageConfiguration *apiv1.StorageConfiguration, + pvc *corev1.PersistentVolumeClaim, +) error { + if storageConfiguration.PersistentVolumeClaimTemplate == nil { + return nil + } + + expectedVolumeAttributesClassName := storageConfiguration.PersistentVolumeClaimTemplate.VolumeAttributesClassName + if expectedVolumeAttributesClassName == pvc.Spec.VolumeAttributesClassName { + return nil + } + + oldPVC := pvc.DeepCopy() + pvc.Spec.VolumeAttributesClassName = expectedVolumeAttributesClassName + if err := c.Patch(ctx, pvc, client.MergeFrom(oldPVC)); err != nil { + return fmt.Errorf("error while changing PVC volume attributes class name: %w", err) + } + + return nil +} + +func reconcilePVCQuantity( + ctx context.Context, + c client.Client, + storageConfiguration *apiv1.StorageConfiguration, + pvc *corev1.PersistentVolumeClaim, +) error { + contextLogger := log.FromContext(ctx) + + parsedSize := storageConfiguration.GetSizeOrNil() + if parsedSize == nil { + return ErrorInvalidSize + } + currentSize := pvc.Spec.Resources.Requests["storage"] + + switch currentSize.AsDec().Cmp(parsedSize.AsDec()) { + case 0: + return nil + case 1: + contextLogger.Warning("cannot decrease storage requirement", + "from", currentSize, "to", parsedSize, + "pvcName", pvc.Name) + return nil + } + + oldPVC := pvc.DeepCopy() + // right now we reconcile the metadata in a different set of functions, so it's not needed to do it here + pvc = resources.NewPersistentVolumeClaimBuilderFromPVC(pvc). + WithRequests(corev1.ResourceList{"storage": *parsedSize}). + Build() + + if err := c.Patch(ctx, pvc, client.MergeFrom(oldPVC)); err != nil { + contextLogger.Error(err, "error while changing PVC storage requirement", + "pvcName", pvc.Name, + "pvc", pvc, + "requests", pvc.Spec.Resources.Requests, + "oldRequests", oldPVC.Spec.Resources.Requests) + return fmt.Errorf("error while changing PVC storage requirement: %w", err) + } + + return nil +} diff --git a/pkg/reconciler/persistentvolumeclaim/instance.go b/pkg/reconciler/persistentvolumeclaim/instance.go index 3dd3ad388f..44db15d0a0 100644 --- a/pkg/reconciler/persistentvolumeclaim/instance.go +++ b/pkg/reconciler/persistentvolumeclaim/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/metadata.go b/pkg/reconciler/persistentvolumeclaim/metadata.go index 5106a65b86..01108a4e35 100644 --- a/pkg/reconciler/persistentvolumeclaim/metadata.go +++ b/pkg/reconciler/persistentvolumeclaim/metadata.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim @@ -87,10 +90,10 @@ func reconcileInstanceRoleLabel( instanceReconciler := metadataReconciler{ name: "instance-role", isUpToDate: func(pvc *corev1.PersistentVolumeClaim) bool { - if pvc.ObjectMeta.Labels[utils.ClusterRoleLabelName] != instanceRole { + if pvc.Labels[utils.ClusterRoleLabelName] != instanceRole { return false } - if pvc.ObjectMeta.Labels[utils.ClusterInstanceRoleLabelName] != instanceRole { + if pvc.Labels[utils.ClusterInstanceRoleLabelName] != instanceRole { return false } diff --git a/pkg/reconciler/persistentvolumeclaim/metadata_test.go b/pkg/reconciler/persistentvolumeclaim/metadata_test.go index 49fa3cfe6e..d57f1072ac 100644 --- a/pkg/reconciler/persistentvolumeclaim/metadata_test.go +++ b/pkg/reconciler/persistentvolumeclaim/metadata_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler.go b/pkg/reconciler/persistentvolumeclaim/reconciler.go index 727fab4be6..1ccbb37fbb 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim @@ -42,7 +45,7 @@ func Reconcile( return res, err } - if err := reconcileResourceRequests(ctx, c, cluster, pvcs); err != nil { + if err := reconcileExistingPVCs(ctx, c, cluster, pvcs); err != nil { if apierrs.IsConflict(err) { contextLogger.Debug("Conflict error while reconciling PVCs", "error", err) return ctrl.Result{Requeue: true}, nil diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go index 4ca794faa8..1fd18cd3f6 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim @@ -33,6 +36,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -157,7 +161,7 @@ var _ = Describe("Reconcile resource requests", func() { cluster := &apiv1.Cluster{} It("Reconcile resources with empty PVCs shouldn't fail", func() { - err := reconcileResourceRequests( + err := reconcileExistingPVCs( context.Background(), cli, cluster, @@ -174,7 +178,7 @@ var _ = Describe("Reconcile resource requests", func() { } cli := fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()).WithObjects(cluster).Build() - err := reconcileResourceRequests( + err := reconcileExistingPVCs( context.Background(), cli, cluster, @@ -184,7 +188,7 @@ var _ = Describe("Reconcile resource requests", func() { }) }) -var _ = Describe("PVC reconciliation", func() { +var _ = Describe("PVC reconciliation", Ordered, func() { const clusterName = "cluster-pvc-reconciliation" fetchPVC := func(cl client.Client, pvcToFetch corev1.PersistentVolumeClaim) corev1.PersistentVolumeClaim { @@ -255,10 +259,11 @@ var _ = Describe("PVC reconciliation", func() { ) Expect(err).ToNot(HaveOccurred()) Expect(pvcs.Items[2].Annotations).To(BeEquivalentTo(map[string]string{ - utils.PVCStatusAnnotationName: "ready", - utils.ClusterSerialAnnotationName: "3", - "annotation1": "value", - "annotation2": "value", + utils.PVCStatusAnnotationName: "ready", + utils.ClusterSerialAnnotationName: "3", + "annotation1": "value", + "annotation2": "value", + utils.OperatorVersionAnnotationName: versions.Version, })) }) @@ -387,8 +392,9 @@ var _ = Describe("PVC reconciliation", func() { utils.ClusterInstanceRoleLabelName: "primary", })) Expect(patchedPvc.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "1", - utils.PVCStatusAnnotationName: "ready", + utils.ClusterSerialAnnotationName: "1", + utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, })) patchedPvc2 := fetchPVC(cl, pvc2) @@ -399,8 +405,9 @@ var _ = Describe("PVC reconciliation", func() { utils.ClusterInstanceRoleLabelName: "replica", })) Expect(patchedPvc2.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "2", - utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, + utils.ClusterSerialAnnotationName: "2", + utils.PVCStatusAnnotationName: "ready", })) patchedPvc3Wal := fetchPVC(cl, pvc3Wal) @@ -411,8 +418,9 @@ var _ = Describe("PVC reconciliation", func() { utils.ClusterInstanceRoleLabelName: "replica", })) Expect(patchedPvc3Wal.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "3", - utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, + utils.ClusterSerialAnnotationName: "3", + utils.PVCStatusAnnotationName: "ready", })) patchedPvc3Data := fetchPVC(cl, pvc3Data) @@ -423,8 +431,9 @@ var _ = Describe("PVC reconciliation", func() { utils.ClusterInstanceRoleLabelName: "replica", })) Expect(patchedPvc3Data.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "3", - utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, + utils.ClusterSerialAnnotationName: "3", + utils.PVCStatusAnnotationName: "ready", })) }) }) @@ -477,11 +486,13 @@ var _ = Describe("Reconcile PVC Quantity", func() { }) It("fail if we dont' have the proper role", func() { - err := reconcilePVCQuantity( - context.Background(), - cli, - cluster, - &pvc) + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity(context.Background(), cli, &storageConfiguration, &pvc) Expect(err).To(HaveOccurred()) }) @@ -490,10 +501,16 @@ var _ = Describe("Reconcile PVC Quantity", func() { utils.PvcRoleLabelName: string(utils.PVCRolePgData), } - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc) Expect(err).To(HaveOccurred()) }) @@ -501,11 +518,17 @@ var _ = Describe("Reconcile PVC Quantity", func() { It("If we don't have the proper storage configuration it should fail", func() { cluster.Spec.StorageConfiguration = apiv1.StorageConfiguration{} + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + // If we don't have a proper storage configuration we should also fail - err := reconcilePVCQuantity( + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc) Expect(err).To(HaveOccurred()) }) @@ -516,10 +539,16 @@ var _ = Describe("Reconcile PVC Quantity", func() { } cluster.Spec.StorageConfiguration.Size = "1Gi" - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc) Expect(err).ToNot(HaveOccurred()) }) @@ -535,10 +564,16 @@ var _ = Describe("Reconcile PVC Quantity", func() { }, } - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc2.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc2) Expect(err).ToNot(HaveOccurred()) }) @@ -554,11 +589,101 @@ var _ = Describe("Reconcile PVC Quantity", func() { }, } - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc2.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc2) Expect(err).ToNot(HaveOccurred()) }) }) + +var _ = Describe("Reconcile Volume Attribute Class", func() { + var ( + clusterName = "cluster-volume-attr" + cluster *apiv1.Cluster + pvc corev1.PersistentVolumeClaim + cli client.Client + ctx context.Context + ) + + BeforeEach(func() { + ctx = context.Background() + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + } + pvc = makePVC(clusterName, "1", "1", NewPgDataCalculator(), false) + cli = fake.NewClientBuilder(). + WithScheme(scheme.BuildWithAllKnownScheme()). + WithObjects(cluster, &pvc). + Build() + }) + + It("does nothing if PersistentVolumeClaimTemplate is nil", func() { + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: nil, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.VolumeAttributesClassName).To(BeNil()) + }) + + It("does nothing if VolumeAttributesClassName is already the expected value", func() { + className := "fast-class" + pvc.Spec.VolumeAttributesClassName = &className + + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + VolumeAttributesClassName: &className, + }, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.VolumeAttributesClassName).To(Equal(className)) + }) + + It("updates VolumeAttributesClassName when it differs from the expected value", func() { + currentClassName := "slow-class" + expectedClassName := "fast-class" + pvc.Spec.VolumeAttributesClassName = ¤tClassName + + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + VolumeAttributesClassName: &expectedClassName, + }, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.VolumeAttributesClassName).To(Equal(expectedClassName)) + }) + + It("sets VolumeAttributesClassName to nil when template specifies nil", func() { + className := "existing-class" + pvc.Spec.VolumeAttributesClassName = &className + + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + VolumeAttributesClassName: nil, + }, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.VolumeAttributesClassName).To(BeNil()) + }) +}) diff --git a/pkg/reconciler/persistentvolumeclaim/requests.go b/pkg/reconciler/persistentvolumeclaim/requests.go deleted file mode 100644 index 71d30a1008..0000000000 --- a/pkg/reconciler/persistentvolumeclaim/requests.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolumeclaim - -import ( - "context" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// reconcileResourceRequests align the resource requests -func reconcileResourceRequests( - ctx context.Context, - c client.Client, - cluster *apiv1.Cluster, - pvcs []corev1.PersistentVolumeClaim, -) error { - if !cluster.ShouldResizeInUseVolumes() { - return nil - } - - for idx := range pvcs { - if err := reconcilePVCQuantity(ctx, c, cluster, &pvcs[idx]); err != nil { - return err - } - } - - return nil -} - -func reconcilePVCQuantity( - ctx context.Context, - c client.Client, - cluster *apiv1.Cluster, - pvc *corev1.PersistentVolumeClaim, -) error { - contextLogger := log.FromContext(ctx) - pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) - if err != nil { - contextLogger.Error(err, - "encountered an error while trying to get pvc role from label", - "role", pvc.Labels[utils.PvcRoleLabelName], - ) - return err - } - - storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) - if err != nil { - contextLogger.Error(err, - "encountered an error while trying to obtain the storage configuration", - "role", pvc.Labels[utils.PvcRoleLabelName], - "pvcName", pvc.Name, - ) - return err - } - - parsedSize := storageConfiguration.GetSizeOrNil() - if parsedSize == nil { - return ErrorInvalidSize - } - currentSize := pvc.Spec.Resources.Requests["storage"] - - switch currentSize.AsDec().Cmp(parsedSize.AsDec()) { - case 0: - return nil - case 1: - contextLogger.Warning("cannot decrease storage requirement", - "from", currentSize, "to", parsedSize, - "pvcName", pvc.Name) - return nil - } - - oldPVC := pvc.DeepCopy() - // right now we reconcile the metadata in a different set of functions, so it's not needed to do it here - pvc = resources.NewPersistentVolumeClaimBuilderFromPVC(pvc). - WithRequests(corev1.ResourceList{"storage": *parsedSize}). - Build() - - if err := c.Patch(ctx, pvc, client.MergeFrom(oldPVC)); err != nil { - contextLogger.Error(err, "error while changing PVC storage requirement", - "pvcName", pvc.Name, - "pvc", pvc, - "requests", pvc.Spec.Resources.Requests, - "oldRequests", oldPVC.Spec.Resources.Requests) - return err - } - - return nil -} diff --git a/pkg/reconciler/persistentvolumeclaim/resources.go b/pkg/reconciler/persistentvolumeclaim/resources.go index 6377ca4f85..1ca30256fc 100644 --- a/pkg/reconciler/persistentvolumeclaim/resources.go +++ b/pkg/reconciler/persistentvolumeclaim/resources.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/resources_test.go b/pkg/reconciler/persistentvolumeclaim/resources_test.go index bbccc10553..e72894f4b2 100644 --- a/pkg/reconciler/persistentvolumeclaim/resources_test.go +++ b/pkg/reconciler/persistentvolumeclaim/resources_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim import ( - "context" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +32,7 @@ import ( ) var _ = Describe("PVC detection", func() { - It("will list PVCs with Jobs or Pods or which are Ready", func() { + It("will list PVCs with Jobs or Pods or which are Ready", func(ctx SpecContext) { clusterName := "myCluster" makeClusterPVC := func(serial string, isResizing bool) corev1.PersistentVolumeClaim { return makePVC(clusterName, serial, serial, NewPgDataCalculator(), isResizing) @@ -48,7 +49,7 @@ var _ = Describe("PVC detection", func() { }, } EnrichStatus( - context.TODO(), + ctx, cluster, []corev1.Pod{ makePod(clusterName, "1", specs.ClusterRoleLabelPrimary), diff --git a/pkg/reconciler/persistentvolumeclaim/status.go b/pkg/reconciler/persistentvolumeclaim/status.go index aa51e4c317..1cd8688a30 100644 --- a/pkg/reconciler/persistentvolumeclaim/status.go +++ b/pkg/reconciler/persistentvolumeclaim/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim @@ -107,7 +110,7 @@ func EnrichStatus( } // There's no point in reattaching ignored PVCs - if pvc.ObjectMeta.DeletionTimestamp != nil { + if pvc.DeletionTimestamp != nil { continue } @@ -139,7 +142,7 @@ func EnrichStatus( filteredPods := utils.FilterActivePods(runningInstances) cluster.Status.ReadyInstances = utils.CountReadyPods(filteredPods) - cluster.Status.InstancesStatus = utils.ListStatusPods(runningInstances) + cluster.Status.InstancesStatus = apiv1.ListStatusPods(runningInstances) cluster.Status.PVCCount = int32(len(managedPVCs)) //nolint:gosec cluster.Status.InitializingPVC = result.getSorted(initializing) @@ -159,7 +162,7 @@ func classifyPVC( instanceName string, ) status { // PVC to ignore - if pvc.ObjectMeta.DeletionTimestamp != nil || hasUnknownStatus(ctx, pvc) { + if pvc.DeletionTimestamp != nil || hasUnknownStatus(ctx, pvc) { return ignored } diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource.go b/pkg/reconciler/persistentvolumeclaim/storagesource.go index f1e625d36b..d3706f0aca 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim @@ -82,14 +85,17 @@ func GetCandidateStorageSourceForReplica( // the cluster itself. Other backups are fine because the required // WALs have been archived in the cluster object store. - // Unless WAL archiving is active, we can't recover a replica from a backup - if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { + // Unless WAL archiving is active (via BarmanObjectStore or a WAL-archiver plugin), + // we can't recover a replica from a backup + walArchivingActive := (cluster.Spec.Backup != nil && cluster.Spec.Backup.BarmanObjectStore != nil) || + cluster.GetEnabledWALArchivePluginName() != "" + if !walArchivingActive { return nil } if result := getCandidateSourceFromBackupList( ctx, - cluster.ObjectMeta.CreationTimestamp, + cluster.CreationTimestamp, backupList, ); result != nil { return result @@ -126,7 +132,7 @@ func getCandidateSourceFromBackupList( continue } - if backup.ObjectMeta.CreationTimestamp.Before(&clusterCreationTime) { + if backup.CreationTimestamp.Before(&clusterCreationTime) { contextLogger.Info( "skipping backup as a potential recovery storage source candidate " + "because if was created before the Cluster object") diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource_test.go b/pkg/reconciler/persistentvolumeclaim/storagesource_test.go index 216d3c5e5d..5044a0c955 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource_test.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim @@ -93,6 +96,20 @@ var _ = Describe("Storage source", func() { }, } + clusterWithPluginOnly := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{}, + WalStorage: &apiv1.StorageConfiguration{}, + Backup: nil, + Plugins: []apiv1.PluginConfiguration{ + { + Name: "test-wal-archiver", + IsWALArchiver: ptr.To(true), + }, + }, + }, + } + backupList := apiv1.BackupList{ Items: []apiv1.Backup{ { @@ -190,6 +207,17 @@ var _ = Describe("Storage source", func() { Expect(source).ToNot(BeNil()) Expect(source.Name).To(Equal("completed-backup")) }) + + It("should return the backup as storage source when WAL archiving is via plugin only", func(ctx context.Context) { + source, err := NewPgDataCalculator().GetSource(GetCandidateStorageSourceForReplica( + ctx, + clusterWithPluginOnly, + backupList, + )) + Expect(err).ToNot(HaveOccurred()) + Expect(source).ToNot(BeNil()) + Expect(source.Name).To(Equal("completed-backup")) + }) }) When("there's no WAL archiving", func() { diff --git a/pkg/reconciler/persistentvolumeclaim/suite_test.go b/pkg/reconciler/persistentvolumeclaim/suite_test.go index 7af04511f7..b47104600a 100644 --- a/pkg/reconciler/persistentvolumeclaim/suite_test.go +++ b/pkg/reconciler/persistentvolumeclaim/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/validation.go b/pkg/reconciler/persistentvolumeclaim/validation.go index ac12af1f5f..7c7a1e9460 100644 --- a/pkg/reconciler/persistentvolumeclaim/validation.go +++ b/pkg/reconciler/persistentvolumeclaim/validation.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/validation_test.go b/pkg/reconciler/persistentvolumeclaim/validation_test.go index e85ab56655..6b157dd4c8 100644 --- a/pkg/reconciler/persistentvolumeclaim/validation_test.go +++ b/pkg/reconciler/persistentvolumeclaim/validation_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/replicaclusterswitch/conditions.go b/pkg/reconciler/replicaclusterswitch/conditions.go index d2ef885c82..73d89850db 100644 --- a/pkg/reconciler/replicaclusterswitch/conditions.go +++ b/pkg/reconciler/replicaclusterswitch/conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replicaclusterswitch diff --git a/pkg/reconciler/replicaclusterswitch/doc.go b/pkg/reconciler/replicaclusterswitch/doc.go index dc4e51bb24..7ad8598b56 100644 --- a/pkg/reconciler/replicaclusterswitch/doc.go +++ b/pkg/reconciler/replicaclusterswitch/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package replicaclusterswitch contains the logic needed to turn on the replica cluster feature on an diff --git a/pkg/reconciler/replicaclusterswitch/reconciler.go b/pkg/reconciler/replicaclusterswitch/reconciler.go index afb00515fb..07a0f0ee7d 100644 --- a/pkg/reconciler/replicaclusterswitch/reconciler.go +++ b/pkg/reconciler/replicaclusterswitch/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replicaclusterswitch @@ -29,8 +32,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -39,7 +43,7 @@ func Reconcile( ctx context.Context, cli client.Client, cluster *apiv1.Cluster, - instanceClient instance.Client, + instanceClient remote.InstanceClient, instances postgres.PostgresqlStatusList, ) (*ctrl.Result, error) { if !cluster.IsReplica() { @@ -89,28 +93,33 @@ func startTransition(ctx context.Context, cli client.Client, cluster *apiv1.Clus return nil, fmt.Errorf("while fencing primary cluster to demote it: %w", err) } - origCluster := cluster.DeepCopy() - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: conditionDesignatedPrimaryTransition, - Status: metav1.ConditionFalse, - Reason: "ReplicaClusterAfterCreation", - Message: "Enabled external cluster after a node was generated", - }) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: conditionFence, - Status: metav1.ConditionTrue, - Reason: "ReplicaClusterAfterCreation", - Message: "Enabled external cluster after a node was generated", - }) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: ConditionReplicaClusterSwitch, - Status: metav1.ConditionFalse, - Reason: "ReplicaEnabledSetTrue", - Message: "Starting the Replica cluster transition", - }) - - cluster.Status.SwitchReplicaClusterStatus.InProgress = true - if err := cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)); err != nil { + if err := status.PatchWithOptimisticLock( + ctx, + cli, + cluster, + func(cluster *apiv1.Cluster) { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: conditionDesignatedPrimaryTransition, + Status: metav1.ConditionFalse, + Reason: "ReplicaClusterAfterCreation", + Message: "Enabled external cluster after a node was generated", + }) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: conditionFence, + Status: metav1.ConditionTrue, + Reason: "ReplicaClusterAfterCreation", + Message: "Enabled external cluster after a node was generated", + }) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: ConditionReplicaClusterSwitch, + Status: metav1.ConditionFalse, + Reason: "ReplicaEnabledSetTrue", + Message: "Starting the Replica cluster transition", + }) + + cluster.Status.SwitchReplicaClusterStatus.InProgress = true + }, + ); err != nil { return nil, err } @@ -132,25 +141,30 @@ func cleanupTransitionMetadata(ctx context.Context, cli client.Client, cluster * return err } } - origCluster := cluster.DeepCopy() - meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionDesignatedPrimaryTransition) - meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionFence) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: ConditionReplicaClusterSwitch, - Status: metav1.ConditionTrue, - Reason: "ReplicaEnabledSetTrue", - Message: "Completed the Replica cluster transition", - }) - cluster.Status.SwitchReplicaClusterStatus.InProgress = false - - return cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) + + return status.PatchWithOptimisticLock( + ctx, + cli, + cluster, + func(cluster *apiv1.Cluster) { + meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionDesignatedPrimaryTransition) + meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionFence) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: ConditionReplicaClusterSwitch, + Status: metav1.ConditionTrue, + Reason: "ReplicaEnabledSetTrue", + Message: "Completed the Replica cluster transition", + }) + cluster.Status.SwitchReplicaClusterStatus.InProgress = false + }, + ) } func reconcileDemotionToken( ctx context.Context, cli client.Client, cluster *apiv1.Cluster, - instanceClient instance.Client, + instanceClient remote.InstanceClient, instances postgres.PostgresqlStatusList, ) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx).WithName("replica_cluster") diff --git a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go index 0af100575c..bfa948aff8 100644 --- a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go +++ b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replicaclusterswitch @@ -23,8 +26,8 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -38,7 +41,7 @@ var errPostgresNotShutDown = fmt.Errorf("expected postmaster to be shut down") func generateDemotionToken( ctx context.Context, cluster *apiv1.Cluster, - instanceClient instance.Client, + instanceClient remote.InstanceClient, instancesStatus postgres.PostgresqlStatusList, ) (string, error) { contextLogger := log.FromContext(ctx).WithName("shutdown_checkpoint") @@ -67,7 +70,7 @@ func generateDemotionToken( return "", fmt.Errorf("could not get pg_controldata from Pod %s: %w", primaryInstance.Pod.Name, err) } parsed := utils.ParsePgControldataOutput(rawPgControlData) - pgDataState := parsed[utils.PgControlDataDatabaseClusterStateKey] + pgDataState := parsed.GetDatabaseClusterState() if !utils.PgDataState(pgDataState).IsShutdown(ctx) { // PostgreSQL is still not shut down, waiting @@ -75,7 +78,7 @@ func generateDemotionToken( return "", errPostgresNotShutDown } - token, err := utils.CreatePromotionToken(parsed) + token, err := parsed.CreatePromotionToken() if err != nil { return "", err } @@ -89,9 +92,9 @@ func generateDemotionToken( return "", fmt.Errorf("could not archive shutdown checkpoint wal file: %w", err) } - if parsed[utils.PgControlDataKeyREDOWALFile] != partialArchiveWALName { + if parsed.GetREDOWALFile() != partialArchiveWALName { return "", fmt.Errorf("unexpected shutdown checkpoint wal file archived, expected: %s, got: %s", - parsed[utils.PgControlDataKeyREDOWALFile], + parsed.GetREDOWALFile(), partialArchiveWALName, ) } diff --git a/pkg/resources/doc.go b/pkg/resources/doc.go index 6501187d05..1936616039 100644 --- a/pkg/resources/doc.go +++ b/pkg/resources/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package resources contains a set of Kubernetes generic utilities diff --git a/pkg/resources/labels_annotations.go b/pkg/resources/labels_annotations.go index 9b2049df59..7bd17656e6 100644 --- a/pkg/resources/labels_annotations.go +++ b/pkg/resources/labels_annotations.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/metadatabuilder.go b/pkg/resources/metadatabuilder.go index ed2e1902d9..300f4f9ee3 100644 --- a/pkg/resources/metadatabuilder.go +++ b/pkg/resources/metadatabuilder.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/persistentvolumeclaim.go b/pkg/resources/persistentvolumeclaim.go index fd1c2c6168..4988222585 100644 --- a/pkg/resources/persistentvolumeclaim.go +++ b/pkg/resources/persistentvolumeclaim.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/retry.go b/pkg/resources/retry.go index 4a0d13aa1b..0c38bbb234 100644 --- a/pkg/resources/retry.go +++ b/pkg/resources/retry.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/retry_test.go b/pkg/resources/retry_test.go index 04ae25cb90..7503998c60 100644 --- a/pkg/resources/retry_test.go +++ b/pkg/resources/retry_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources import ( - "context" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,11 +42,9 @@ var _ = Describe("RetryWithRefreshedResource", func() { var ( fakeClient client.Client testResource *appsv1.Deployment - ctx context.Context ) BeforeEach(func() { - ctx = context.TODO() fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() testResource = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, @@ -70,7 +69,7 @@ var _ = Describe("RetryWithRefreshedResource", func() { }) Context("when client.Get succeeds", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { // Set up the fake client to return the resource without error Expect(fakeClient.Create(ctx, testResource)).To(Succeed()) @@ -80,7 +79,7 @@ var _ = Describe("RetryWithRefreshedResource", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should invoke the callback without error and update the resource", func() { + It("should invoke the callback without error and update the resource", func(ctx SpecContext) { // ensure that the local deployment contains the old value Expect(*testResource.Spec.Replicas).To(Equal(int32(1))) diff --git a/pkg/resources/status/backup.go b/pkg/resources/status/backup.go new file mode 100644 index 0000000000..01f594b533 --- /dev/null +++ b/pkg/resources/status/backup.go @@ -0,0 +1,134 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "context" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// BackupTransaction is a function that modifies a Backup object. +type BackupTransaction func(*apiv1.Backup) + +type flagBackupErrors struct { + clusterStatusErr error + backupErr error + clusterConditionErr error +} + +func (f flagBackupErrors) Error() string { + var message string + if f.clusterStatusErr != nil { + message += fmt.Sprintf("error patching cluster status: %v; ", f.clusterStatusErr) + } + if f.backupErr != nil { + message += fmt.Sprintf("error patching backup status: %v; ", f.backupErr) + } + if f.clusterConditionErr != nil { + message += fmt.Sprintf("error patching cluster conditions: %v; ", f.clusterConditionErr) + } + + return message +} + +// toError returns the errors encountered or nil +func (f flagBackupErrors) toError() error { + if f.clusterStatusErr != nil || f.backupErr != nil || f.clusterConditionErr != nil { + return f + } + return nil +} + +// FlagBackupAsFailed updates the status of a Backup object to indicate that it has failed. +func FlagBackupAsFailed( + ctx context.Context, + cli client.Client, + backup *apiv1.Backup, + cluster *apiv1.Cluster, + err error, + transactions ...BackupTransaction, +) error { + contextLogger := log.FromContext(ctx) + + var flagErr flagBackupErrors + + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingBackup apiv1.Backup + if err := cli.Get(ctx, client.ObjectKeyFromObject(backup), &livingBackup); err != nil { + contextLogger.Error(err, "failed to get backup") + return err + } + origBackup := livingBackup.DeepCopy() + livingBackup.Status.SetAsFailed(err) + livingBackup.Status.Method = livingBackup.Spec.Method + for _, transaction := range transactions { + transaction(&livingBackup) + } + + err := cli.Status().Patch(ctx, &livingBackup, client.MergeFrom(origBackup)) + if err != nil { + contextLogger.Error(err, "while patching backup status") + return err + } + // we mutate the original object + backup.Status = livingBackup.Status + + return nil + }); err != nil { + contextLogger.Error(err, "while flagging backup as failed") + flagErr.backupErr = err + } + + if cluster == nil { + return flagErr.toError() + } + + if err := PatchWithOptimisticLock( + ctx, + cli, + cluster, + func(cluster *apiv1.Cluster) { + cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) //nolint:staticcheck + }, + ); err != nil { + contextLogger.Error(err, "while patching cluster status with last failed backup") + flagErr.clusterStatusErr = err + } + + if err := PatchConditionsWithOptimisticLock( + ctx, + cli, + cluster, + apiv1.BuildClusterBackupFailedCondition(err), + ); err != nil { + contextLogger.Error(err, "while patching backup condition in the cluster status (backup failed)") + flagErr.clusterConditionErr = err + } + + return flagErr.toError() +} diff --git a/pkg/resources/status/backup_test.go b/pkg/resources/status/backup_test.go new file mode 100644 index 0000000000..3a4172c795 --- /dev/null +++ b/pkg/resources/status/backup_test.go @@ -0,0 +1,83 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("FlagBackupAsFailed", func() { + scheme := schemeBuilder.BuildWithAllKnownScheme() + k8sClient := fake.NewClientBuilder().WithScheme(scheme). + WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Backup{}). + Build() + + It("selects the new target primary right away", func(ctx SpecContext) { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + } + + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name, + Namespace: cluster.Namespace, + }, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{ + Name: cluster.Name, + }, + }, + Status: apiv1.BackupStatus{ + Phase: apiv1.BackupPhaseRunning, + }, + } + Expect(k8sClient.Create(ctx, cluster)).To(Succeed()) + Expect(k8sClient.Create(ctx, backup)).To(Succeed()) + + err := FlagBackupAsFailed(ctx, k8sClient, backup, cluster, errors.New("my sample error")) + Expect(err).NotTo(HaveOccurred()) + + // Backup status assertions + Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseFailed)) + Expect(backup.Status.Error).To(BeEquivalentTo("my sample error")) + + // Cluster status assertions + Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) //nolint:staticcheck + for _, condition := range cluster.Status.Conditions { + if condition.Type == string(apiv1.ConditionBackup) { + Expect(condition.Status).To(BeEquivalentTo(metav1.ConditionFalse)) + Expect(condition.Reason).To(BeEquivalentTo(string(apiv1.ConditionReasonLastBackupFailed))) + Expect(condition.Message).To(BeEquivalentTo("my sample error")) + } + } + }) +}) diff --git a/pkg/resources/status/conditions.go b/pkg/resources/status/conditions.go new file mode 100644 index 0000000000..00c87d5e9a --- /dev/null +++ b/pkg/resources/status/conditions.go @@ -0,0 +1,84 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// PatchConditionsWithOptimisticLock will update a particular condition in cluster status. +// This function may update the conditions in the passed cluster +// with the latest ones that were found from the API server. +// This function is needed because Kubernetes still doesn't support strategic merge +// for CRDs (see https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). +func PatchConditionsWithOptimisticLock( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + conditions ...metav1.Condition, +) error { + if cluster == nil || len(conditions) == 0 { + return nil + } + + applyConditions := func(cluster *apiv1.Cluster) bool { + changed := false + for _, c := range conditions { + changed = changed || meta.SetStatusCondition(&cluster.Status.Conditions, c) + } + return changed + } + + var currentCluster apiv1.Cluster + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil { + return err + } + + updatedCluster := currentCluster.DeepCopy() + if changed := applyConditions(updatedCluster); !changed { + return nil + } + + if err := c.Status().Patch( + ctx, + updatedCluster, + client.MergeFromWithOptions(¤tCluster, client.MergeFromWithOptimisticLock{}), + ); err != nil { + return err + } + + cluster.Status.Conditions = updatedCluster.Status.Conditions + + return nil + }); err != nil { + return fmt.Errorf("while patching conditions: %w", err) + } + + return nil +} diff --git a/pkg/resources/status/doc.go b/pkg/resources/status/doc.go index d998bfbcbb..b830750400 100644 --- a/pkg/resources/status/doc.go +++ b/pkg/resources/status/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package status contains all the function needed to interact properly with the resources status diff --git a/pkg/resources/status/patch.go b/pkg/resources/status/patch.go new file mode 100644 index 0000000000..5bc7e59483 --- /dev/null +++ b/pkg/resources/status/patch.go @@ -0,0 +1,90 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// PatchWithOptimisticLock updates the status of the cluster using the passed +// transaction functions (in the given order). +// Important: after successfully updating the status, this +// function refreshes it into the passed cluster +func PatchWithOptimisticLock( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + txs ...Transaction, +) error { + if cluster == nil { + return nil + } + + contextLogger := log.FromContext(ctx) + + origCluster := cluster.DeepCopy() + + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var currentCluster apiv1.Cluster + if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil { + return err + } + + updatedCluster := currentCluster.DeepCopy() + for _, tx := range txs { + tx(updatedCluster) + } + + if equality.Semantic.DeepEqual(currentCluster.Status, updatedCluster.Status) { + return nil + } + + if err := c.Status().Patch( + ctx, + updatedCluster, + client.MergeFromWithOptions(¤tCluster, client.MergeFromWithOptimisticLock{}), + ); err != nil { + return err + } + + cluster.Status = updatedCluster.Status + + return nil + }); err != nil { + return fmt.Errorf("while patching status: %w", err) + } + + if cluster.Status.Phase != apiv1.PhaseHealthy && origCluster.Status.Phase == apiv1.PhaseHealthy { + contextLogger.Info("Cluster has become unhealthy") + } + if cluster.Status.Phase == apiv1.PhaseHealthy && origCluster.Status.Phase != apiv1.PhaseHealthy { + contextLogger.Info("Cluster has become healthy") + } + + return nil +} diff --git a/pkg/resources/status/phase.go b/pkg/resources/status/phase.go deleted file mode 100644 index 684eefd26c..0000000000 --- a/pkg/resources/status/phase.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "context" - "reflect" - - "github.com/cloudnative-pg/machinery/pkg/log" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// RegisterPhase update phase in the status cluster with the -// proper reason -func RegisterPhase( - ctx context.Context, - cli client.Client, - cluster *apiv1.Cluster, - phase string, - reason string, -) error { - existingCluster := cluster.DeepCopy() - return RegisterPhaseWithOrigCluster(ctx, cli, cluster, existingCluster, phase, reason) -} - -// RegisterPhaseWithOrigCluster update phase in the status cluster with the -// proper reason, it also receives an origCluster to preserve other modifications done to the status -func RegisterPhaseWithOrigCluster( - ctx context.Context, - cli client.Client, - modifiedCluster *apiv1.Cluster, - origCluster *apiv1.Cluster, - phase string, - reason string, -) error { - contextLogger := log.FromContext(ctx) - - // we ensure that the modifiedCluster conditions aren't nil before operating - if modifiedCluster.Status.Conditions == nil { - modifiedCluster.Status.Conditions = []metav1.Condition{} - } - - modifiedCluster.Status.Phase = phase - modifiedCluster.Status.PhaseReason = reason - - condition := metav1.Condition{ - Type: string(apiv1.ConditionClusterReady), - Status: metav1.ConditionFalse, - Reason: string(apiv1.ClusterIsNotReady), - Message: "Cluster Is Not Ready", - } - - if modifiedCluster.Status.Phase == apiv1.PhaseHealthy { - condition = metav1.Condition{ - Type: string(apiv1.ConditionClusterReady), - Status: metav1.ConditionTrue, - Reason: string(apiv1.ClusterReady), - Message: "Cluster is Ready", - } - } - - meta.SetStatusCondition(&modifiedCluster.Status.Conditions, condition) - - if !reflect.DeepEqual(origCluster, modifiedCluster) { - modifiedPhase := modifiedCluster.Status.Phase - origPhase := origCluster.Status.Phase - - if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy { - contextLogger.Info("Cluster is not healthy") - } - if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy { - contextLogger.Info("Cluster is healthy") - } - if err := cli.Status().Patch(ctx, modifiedCluster, client.MergeFrom(origCluster)); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/resources/status/suite_test.go b/pkg/resources/status/suite_test.go new file mode 100644 index 0000000000..7ef3812e04 --- /dev/null +++ b/pkg/resources/status/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestConfiguration(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Internal Configuration Test Suite") +} diff --git a/pkg/resources/status/transactions.go b/pkg/resources/status/transactions.go new file mode 100644 index 0000000000..d758ee3477 --- /dev/null +++ b/pkg/resources/status/transactions.go @@ -0,0 +1,78 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// Transaction is a function that modifies a cluster +type Transaction func(cluster *apiv1.Cluster) + +// SetClusterReadyCondition updates the cluster's readiness condition +// according to the cluster phase +func SetClusterReadyCondition(cluster *apiv1.Cluster) { + if cluster.Status.Conditions == nil { + cluster.Status.Conditions = []metav1.Condition{} + } + + condition := metav1.Condition{ + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionFalse, + Reason: string(apiv1.ClusterIsNotReady), + Message: "Cluster Is Not Ready", + } + + if cluster.Status.Phase == apiv1.PhaseHealthy { + condition = metav1.Condition{ + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ClusterReady), + Message: "Cluster is Ready", + } + } + + meta.SetStatusCondition(&cluster.Status.Conditions, condition) +} + +// SetPhase is a transaction that sets the cluster phase and reason +func SetPhase(phase string, reason string) Transaction { + return func(cluster *apiv1.Cluster) { + cluster.Status.Phase = phase + cluster.Status.PhaseReason = reason + } +} + +// SetImage is a transaction that sets the cluster image +func SetImage(image string) Transaction { + return func(cluster *apiv1.Cluster) { + cluster.Status.Image = image + } +} + +// SetPGDataImageInfo is a transaction that sets the PGDataImageInfo +func SetPGDataImageInfo(imageInfo *apiv1.ImageInfo) Transaction { + return func(cluster *apiv1.Cluster) { + cluster.Status.PGDataImageInfo = imageInfo + } +} diff --git a/pkg/resources/suite_test.go b/pkg/resources/suite_test.go index 9accbbf2ed..62d320da65 100644 --- a/pkg/resources/suite_test.go +++ b/pkg/resources/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/servicespec/builder.go b/pkg/servicespec/builder.go index 8f1cbaae36..1b8dbc7038 100644 --- a/pkg/servicespec/builder.go +++ b/pkg/servicespec/builder.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package servicespec contains various utilities to deal with Service Specs diff --git a/pkg/servicespec/builder_test.go b/pkg/servicespec/builder_test.go index 11916a3955..9529666796 100644 --- a/pkg/servicespec/builder_test.go +++ b/pkg/servicespec/builder_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package servicespec diff --git a/pkg/servicespec/suite_test.go b/pkg/servicespec/suite_test.go index 2c2460ac8d..98796d834c 100644 --- a/pkg/servicespec/suite_test.go +++ b/pkg/servicespec/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package servicespec diff --git a/pkg/specs/containers.go b/pkg/specs/containers.go index 88a56af2d7..27ebfe3f65 100644 --- a/pkg/specs/containers.go +++ b/pkg/specs/containers.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -38,7 +41,7 @@ func createBootstrapContainer(cluster apiv1.Cluster) corev1.Container { "bootstrap", "/controller/manager", }, - VolumeMounts: createPostgresVolumeMounts(cluster), + VolumeMounts: CreatePostgresVolumeMounts(cluster), Resources: cluster.Spec.Resources, SecurityContext: CreateContainerSecurityContext(cluster.GetSeccompProfile()), } diff --git a/pkg/specs/containers_test.go b/pkg/specs/containers_test.go index e8573c9fd3..3839a090bc 100644 --- a/pkg/specs/containers_test.go +++ b/pkg/specs/containers_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go index 3ccdd8c0d0..12ae77f200 100644 --- a/pkg/specs/jobs.go +++ b/pkg/specs/jobs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -86,7 +89,7 @@ func CreatePrimaryJobViaInitdb(cluster apiv1.Cluster, nodeSerial int) *batchv1.J initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) if cluster.Spec.Bootstrap.InitDB.Import != nil { - return createPrimaryJob(cluster, nodeSerial, jobRoleImport, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRoleImport, initCommand) } if cluster.ShouldInitDBRunPostInitApplicationSQLRefs() { @@ -104,7 +107,7 @@ func CreatePrimaryJobViaInitdb(cluster apiv1.Cluster, nodeSerial int) *batchv1.J "--post-init-sql-refs-folder", postInitSQLRefsFolder.toString()) } - return createPrimaryJob(cluster, nodeSerial, jobRoleInitDB, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRoleInitDB, initCommand) } func buildInitDBFlags(cluster apiv1.Cluster) (initCommand []string) { @@ -142,6 +145,21 @@ func buildInitDBFlags(cluster apiv1.Cluster) (initCommand []string) { if localeCType := config.LocaleCType; localeCType != "" { options = append(options, fmt.Sprintf("--lc-ctype=%s", localeCType)) } + if locale := config.Locale; locale != "" { + options = append(options, fmt.Sprintf("--locale=%s", locale)) + } + if localeProvider := config.LocaleProvider; localeProvider != "" { + options = append(options, fmt.Sprintf("--locale-provider=%s", localeProvider)) + } + if icuLocale := config.IcuLocale; icuLocale != "" { + options = append(options, fmt.Sprintf("--icu-locale=%s", icuLocale)) + } + if icuRules := config.IcuRules; icuRules != "" { + options = append(options, fmt.Sprintf("--icu-rules=%s", icuRules)) + } + if builtinLocale := config.BuiltinLocale; builtinLocale != "" { + options = append(options, fmt.Sprintf("--builtin-locale=%s", builtinLocale)) + } if walSegmentSize := config.WalSegmentSize; walSegmentSize != 0 && utils.IsPowerOfTwo(walSegmentSize) { options = append(options, fmt.Sprintf("--wal-segsize=%v", walSegmentSize)) } @@ -178,7 +196,7 @@ func CreatePrimaryJobViaRestoreSnapshot( initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - job := createPrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) + job := CreatePrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) addBarmanEndpointCAToJobFromCluster(cluster, backup, job) @@ -195,7 +213,7 @@ func CreatePrimaryJobViaRecovery(cluster apiv1.Cluster, nodeSerial int, backup * initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - job := createPrimaryJob(cluster, nodeSerial, jobRoleFullRecovery, initCommand) + job := CreatePrimaryJob(cluster, nodeSerial, jobRoleFullRecovery, initCommand) addBarmanEndpointCAToJobFromCluster(cluster, backup, job) @@ -236,7 +254,7 @@ func CreatePrimaryJobViaPgBaseBackup(cluster apiv1.Cluster, nodeSerial int) *bat initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - return createPrimaryJob(cluster, nodeSerial, jobRolePGBaseBackup, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRolePGBaseBackup, initCommand) } // JoinReplicaInstance create a new PostgreSQL node, copying the contents from another Pod @@ -250,7 +268,7 @@ func JoinReplicaInstance(cluster apiv1.Cluster, nodeSerial int) *batchv1.Job { initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - return createPrimaryJob(cluster, nodeSerial, jobRoleJoin, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRoleJoin, initCommand) } // RestoreReplicaInstance creates a new PostgreSQL replica starting from a volume snapshot backup @@ -264,7 +282,7 @@ func RestoreReplicaInstance(cluster apiv1.Cluster, nodeSerial int) *batchv1.Job initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - job := createPrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) + job := CreatePrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) return job } @@ -290,25 +308,14 @@ const ( jobRoleSnapshotRecovery jobRole = "snapshot-recovery" ) -var jobRoleList = []jobRole{jobRoleImport, jobRoleInitDB, jobRolePGBaseBackup, jobRoleFullRecovery, jobRoleJoin} - // getJobName returns a string indicating the job name func (role jobRole) getJobName(instanceName string) string { return fmt.Sprintf("%s-%s", instanceName, role) } -// GetPossibleJobNames get all the possible job names for a given instance -func GetPossibleJobNames(instanceName string) []string { - res := make([]string, len(jobRoleList)) - for idx, role := range jobRoleList { - res[idx] = role.getJobName(instanceName) - } - return res -} - -// createPrimaryJob create a job that executes the provided command. +// CreatePrimaryJob create a job that executes the provided command. // The role should describe the purpose of the executed job -func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initCommand []string) *batchv1.Job { +func CreatePrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initCommand []string) *batchv1.Job { instanceName := GetInstanceName(cluster.Name, nodeSerial) jobName := role.getJobName(instanceName) @@ -321,6 +328,7 @@ func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initC Labels: map[string]string{ utils.InstanceNameLabelName: instanceName, utils.ClusterLabelName: cluster.Name, + utils.JobRoleLabelName: string(role), }, }, Spec: batchv1.JobSpec{ @@ -341,12 +349,12 @@ func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initC Containers: []corev1.Container{ { Name: string(role), - Image: cluster.GetImageName(), + Image: cluster.Status.Image, ImagePullPolicy: cluster.Spec.ImagePullPolicy, Env: envConfig.EnvVars, EnvFrom: envConfig.EnvFrom, Command: initCommand, - VolumeMounts: createPostgresVolumeMounts(cluster), + VolumeMounts: CreatePostgresVolumeMounts(cluster), Resources: cluster.Spec.Resources, SecurityContext: CreateContainerSecurityContext(cluster.GetSeccompProfile()), }, diff --git a/pkg/specs/jobs_test.go b/pkg/specs/jobs_test.go index 5d9c7eab32..e4e7ea1daa 100644 --- a/pkg/specs/jobs_test.go +++ b/pkg/specs/jobs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,16 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs import ( - v1 "k8s.io/api/batch/v1" + "slices" + + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,8 +42,8 @@ var _ = Describe("Barman endpoint CA", func() { }, } - job := v1.Job{ - Spec: v1.JobSpec{ + job := batchv1.Job{ + Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{}}, @@ -80,8 +85,8 @@ var _ = Describe("Barman endpoint CA", func() { }, } - job := v1.Job{ - Spec: v1.JobSpec{ + job := batchv1.Job{ + Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -118,8 +123,8 @@ var _ = Describe("Barman endpoint CA", func() { }, }} - job := v1.Job{ - Spec: v1.JobSpec{ + job := batchv1.Job{ + Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -165,4 +170,29 @@ var _ = Describe("Job created via InitDB", func() { Expect(job.Spec.Template.Spec.Containers[0].Command).Should(ContainElement( postInitApplicationSQLRefsFolder.toString())) }) + + It("contains icu configuration", func() { + cluster := apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Encoding: "UTF-8", + LocaleProvider: "icu", + IcuLocale: "und", + IcuRules: "&A < z <<< Z", + }, + }, + }, + } + job := CreatePrimaryJobViaInitdb(cluster, 0) + + jobCommand := job.Spec.Template.Spec.Containers[0].Command + Expect(jobCommand).Should(ContainElement("--initdb-flags")) + initdbFlags := jobCommand[slices.Index(jobCommand, "--initdb-flags")+1] + Expect(initdbFlags).Should(ContainSubstring("--encoding=UTF-8")) + Expect(initdbFlags).Should(ContainSubstring("--locale-provider=icu")) + Expect(initdbFlags).Should(ContainSubstring("--icu-locale=und")) + Expect(initdbFlags).ShouldNot(ContainSubstring("--locale=")) + Expect(initdbFlags).Should(ContainSubstring("'--icu-rules=&A < z <<< Z'")) + }) }) diff --git a/pkg/specs/pg_pods.go b/pkg/specs/pg_pods.go index a2ac41572a..1ca30e55a0 100644 --- a/pkg/specs/pg_pods.go +++ b/pkg/specs/pg_pods.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/pg_pods_test.go b/pkg/specs/pg_pods_test.go index f7a5794e1c..982be12afc 100644 --- a/pkg/specs/pg_pods_test.go +++ b/pkg/specs/pg_pods_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -32,8 +37,12 @@ var _ = Describe("Extract the used image name", func() { Name: "clusterName", Namespace: "default", }, + Status: apiv1.ClusterStatus{ + Image: configuration.Current.PostgresImageName, + }, } - pod := PodWithExistingStorage(cluster, 1) + pod, err := NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) It("extract the default image name", func() { Expect(GetPostgresImageName(*pod)).To(Equal(configuration.Current.PostgresImageName)) diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go index 12fe9e0931..2df9f7d71a 100644 --- a/pkg/specs/pgbouncer/deployments.go +++ b/pkg/specs/pgbouncer/deployments.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbouncer contains the specification of the K8s resources @@ -19,6 +22,8 @@ limitations under the License. package pgbouncer import ( + "path" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,7 +42,7 @@ import ( const ( // DefaultPgbouncerImage is the name of the pgbouncer image used by default - DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.23.0" + DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.24.1" ) // Deployment create the deployment of pgbouncer, given @@ -89,6 +94,7 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen WithInitContainerCommand(specs.BootstrapControllerContainerName, []string{"/manager", "bootstrap", "/controller/manager"}, true). + WithInitContainerResources(specs.BootstrapControllerContainerName, pooler.GetResourcesRequirements(), true). WithInitContainerSecurityContext(specs.BootstrapControllerContainerName, specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), true). @@ -108,6 +114,13 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen }, true). WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "NAMESPACE", Value: pooler.Namespace}, true). WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "POOLER_NAME", Value: pooler.Name}, true). + WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGUSER", Value: "pgbouncer"}, false). + WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGDATABASE", Value: "pgbouncer"}, false). + WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGHOST", Value: "/controller/run"}, false). + WithContainerEnv("pgbouncer", corev1.EnvVar{ + Name: "PSQL_HISTORY", + Value: path.Join(postgres.TemporaryDirectory, ".psql_history"), + }, false). WithContainerSecurityContext("pgbouncer", specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), true). WithServiceAccountName(pooler.Name, true). WithReadinessProbe("pgbouncer", &corev1.Probe{ diff --git a/pkg/specs/pgbouncer/deployments_test.go b/pkg/specs/pgbouncer/deployments_test.go index 67ba6bc306..31af3a075e 100644 --- a/pkg/specs/pgbouncer/deployments_test.go +++ b/pkg/specs/pgbouncer/deployments_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/podmonitor.go b/pkg/specs/pgbouncer/podmonitor.go index 579a8006d6..92a64c9bb6 100644 --- a/pkg/specs/pgbouncer/podmonitor.go +++ b/pkg/specs/pgbouncer/podmonitor.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer @@ -51,8 +54,9 @@ func (c PoolerPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { utils.SetAsOwnedBy(&meta, c.pooler.ObjectMeta, c.pooler.TypeMeta) + metricsPort := "metrics" endpoint := monitoringv1.PodMetricsEndpoint{ - Port: "metrics", + Port: &metricsPort, } if c.pooler.Spec.Monitoring != nil { @@ -62,7 +66,10 @@ func (c PoolerPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { spec := monitoringv1.PodMonitorSpec{ Selector: metav1.LabelSelector{ - MatchLabels: meta.Labels, + MatchLabels: map[string]string{ + utils.PgbouncerNameLabel: c.pooler.Name, + utils.PodRoleLabelName: string(utils.PodRolePooler), + }, }, PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{endpoint}, } diff --git a/pkg/specs/pgbouncer/podmonitor_test.go b/pkg/specs/pgbouncer/podmonitor_test.go index 8643955ff5..dfceeb3a7c 100644 --- a/pkg/specs/pgbouncer/podmonitor_test.go +++ b/pkg/specs/pgbouncer/podmonitor_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer @@ -76,10 +79,11 @@ var _ = Describe("PoolerPodMonitorManager", func() { Expect(podMonitor.Spec.Selector.MatchLabels).To(Equal(map[string]string{ utils.PgbouncerNameLabel: pooler.Name, + utils.PodRoleLabelName: string(utils.PodRolePooler), })) Expect(podMonitor.Spec.PodMetricsEndpoints).To(HaveLen(1)) - Expect(podMonitor.Spec.PodMetricsEndpoints[0].Port).To(Equal("metrics")) + Expect(*podMonitor.Spec.PodMetricsEndpoints[0].Port).To(Equal("metrics")) }) }) diff --git a/pkg/specs/pgbouncer/rbac.go b/pkg/specs/pgbouncer/rbac.go index f134ca9149..05fb89c52f 100644 --- a/pkg/specs/pgbouncer/rbac.go +++ b/pkg/specs/pgbouncer/rbac.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/rbac_test.go b/pkg/specs/pgbouncer/rbac_test.go index 0404927eb1..a4f961ed45 100644 --- a/pkg/specs/pgbouncer/rbac_test.go +++ b/pkg/specs/pgbouncer/rbac_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/services.go b/pkg/specs/pgbouncer/services.go index 4b63446b04..0b8653533e 100644 --- a/pkg/specs/pgbouncer/services.go +++ b/pkg/specs/pgbouncer/services.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/services_test.go b/pkg/specs/pgbouncer/services_test.go index 62d14a25c3..0163399b43 100644 --- a/pkg/specs/pgbouncer/services_test.go +++ b/pkg/specs/pgbouncer/services_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/suite_test.go b/pkg/specs/pgbouncer/suite_test.go index ab693cbd2c..d8e2c982c1 100644 --- a/pkg/specs/pgbouncer/suite_test.go +++ b/pkg/specs/pgbouncer/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/poddisruptionbudget.go b/pkg/specs/poddisruptionbudget.go index cf88f80454..b44180c737 100644 --- a/pkg/specs/poddisruptionbudget.go +++ b/pkg/specs/poddisruptionbudget.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/poddisruptionbudget_test.go b/pkg/specs/poddisruptionbudget_test.go index 115ae0825b..a58132a748 100644 --- a/pkg/specs/poddisruptionbudget_test.go +++ b/pkg/specs/poddisruptionbudget_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/podmonitor.go b/pkg/specs/podmonitor.go index 25770e3684..dcf9ef2ee6 100644 --- a/pkg/specs/podmonitor.go +++ b/pkg/specs/podmonitor.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -24,6 +27,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // ClusterPodMonitorManager builds the PodMonitor for the cluster resource @@ -44,8 +48,9 @@ func (c ClusterPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { } c.cluster.SetInheritedDataAndOwnership(&meta) + metricsPort := "metrics" endpoint := monitoringv1.PodMetricsEndpoint{ - Port: "metrics", + Port: &metricsPort, } if c.cluster.IsMetricsTLSEnabled() { @@ -73,7 +78,10 @@ func (c ClusterPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { spec := monitoringv1.PodMonitorSpec{ Selector: metav1.LabelSelector{ - MatchLabels: meta.Labels, + MatchLabels: map[string]string{ + utils.ClusterLabelName: c.cluster.Name, + utils.PodRoleLabelName: string(utils.PodRoleInstance), + }, }, PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{endpoint}, } diff --git a/pkg/specs/podmonitor_test.go b/pkg/specs/podmonitor_test.go index 6c486e1808..30215e43da 100644 --- a/pkg/specs/podmonitor_test.go +++ b/pkg/specs/podmonitor_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -35,6 +38,7 @@ var _ = Describe("PodMonitor test", func() { clusterName = "test" clusterNamespace = "test-namespace" ) + metricsPort := "metrics" assertPodMonitorCorrect := func(cluster *apiv1.Cluster, expectedEndpoint monitoringv1.PodMetricsEndpoint) { getMetricRelabelings := func() []monitoringv1.RelabelConfig { @@ -61,8 +65,14 @@ var _ = Describe("PodMonitor test", func() { It("should create a valid monitoringv1.PodMonitor object", func() { mgr := NewClusterPodMonitorManager(cluster.DeepCopy()) monitor := mgr.BuildPodMonitor() - Expect(monitor.Labels[utils.ClusterLabelName]).To(Equal(cluster.Name)) - Expect(monitor.Spec.Selector.MatchLabels[utils.ClusterLabelName]).To(Equal(cluster.Name)) + Expect(monitor.Labels).To(BeEquivalentTo(map[string]string{ + utils.ClusterLabelName: cluster.Name, + })) + Expect(monitor.Spec.Selector.MatchLabels).To(BeEquivalentTo(map[string]string{ + utils.ClusterLabelName: cluster.Name, + utils.PodRoleLabelName: string(utils.PodRoleInstance), + })) + Expect(monitor.Spec.PodMetricsEndpoints).To(ContainElement(expectedEndpoint)) }) @@ -121,7 +131,7 @@ var _ = Describe("PodMonitor test", func() { }, } - expectedEndpoint := monitoringv1.PodMetricsEndpoint{Port: "metrics"} + expectedEndpoint := monitoringv1.PodMetricsEndpoint{Port: &metricsPort} assertPodMonitorCorrect(&cluster, expectedEndpoint) }) @@ -142,7 +152,7 @@ var _ = Describe("PodMonitor test", func() { } expectedEndpoint := monitoringv1.PodMetricsEndpoint{ - Port: "metrics", + Port: &metricsPort, Scheme: "https", TLSConfig: &monitoringv1.SafeTLSConfig{ CA: monitoringv1.SecretOrConfigMap{ diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 91aa0b758f..127b6f2040 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package specs contains the specification of the K8s resources @@ -19,18 +22,24 @@ limitations under the License. package specs import ( + "context" "encoding/json" "fmt" "math" + "path" "reflect" "slices" "strconv" + "github.com/cloudnative-pg/machinery/pkg/log" + jsonpatch "github.com/evanphx/json-patch/v5" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" + cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -131,6 +140,10 @@ func CreatePodEnvConfig(cluster apiv1.Cluster, podName string) EnvConfig { Name: "CLUSTER_NAME", Value: cluster.Name, }, + { + Name: "PSQL_HISTORY", + Value: path.Join(postgres.TemporaryDirectory, ".psql_history"), + }, { Name: "PGPORT", Value: strconv.Itoa(postgres.ServerPort), @@ -139,18 +152,32 @@ func CreatePodEnvConfig(cluster apiv1.Cluster, podName string) EnvConfig { Name: "PGHOST", Value: postgres.SocketDirectory, }, + { + Name: "TMPDIR", + Value: postgres.TemporaryDirectory, + }, }, EnvFrom: cluster.Spec.EnvFrom, } config.EnvVars = append(config.EnvVars, cluster.Spec.Env...) + if configuration.Current.StandbyTCPUserTimeout != 0 { + config.EnvVars = append( + config.EnvVars, + corev1.EnvVar{ + Name: "CNPG_STANDBY_TCP_USER_TIMEOUT", + Value: strconv.Itoa(configuration.Current.StandbyTCPUserTimeout), + }, + ) + } + hashValue, _ := hash.ComputeHash(config) config.Hash = hashValue return config } -// CreateClusterPodSpec computes the PodSpec corresponding to a cluster -func CreateClusterPodSpec( +// createClusterPodSpec computes the PodSpec corresponding to a cluster +func createClusterPodSpec( podName string, cluster apiv1.Cluster, envConfig EnvConfig, @@ -184,22 +211,25 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable containers := []corev1.Container{ { Name: PostgresContainerName, - Image: cluster.GetImageName(), + Image: cluster.Status.Image, ImagePullPolicy: cluster.Spec.ImagePullPolicy, Env: envConfig.EnvVars, EnvFrom: envConfig.EnvFrom, - VolumeMounts: createPostgresVolumeMounts(cluster), + VolumeMounts: CreatePostgresVolumeMounts(cluster), + // This is the default startup probe, and can be overridden + // the user configuration in cluster.spec.probes.startup StartupProbe: &corev1.Probe{ - FailureThreshold: getStartupProbeFailureThreshold(cluster.GetMaxStartDelay()), - PeriodSeconds: StartupProbePeriod, - TimeoutSeconds: 5, + PeriodSeconds: StartupProbePeriod, + TimeoutSeconds: 5, ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ - Path: url.PathHealth, + Path: url.PathStartup, Port: intstr.FromInt32(url.StatusPort), }, }, }, + // This is the default readiness probe, and can be overridden + // by the user configuration in cluster.spec.probes.readiness ReadinessProbe: &corev1.Probe{ TimeoutSeconds: 5, PeriodSeconds: ReadinessProbePeriod, @@ -210,6 +240,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable }, }, }, + // This is the default liveness probe, and can be overridden + // by the user configuration in cluster.spec.probes.liveness LivenessProbe: &corev1.Probe{ PeriodSeconds: LivenessProbePeriod, TimeoutSeconds: 5, @@ -248,9 +280,9 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable } if enableHTTPS { - containers[0].StartupProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS - containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS - containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + containers[0].StartupProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + containers[0].LivenessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + containers[0].ReadinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS containers[0].Command = append(containers[0].Command, "--status-port-tls") } @@ -260,36 +292,49 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable addManagerLoggingOptions(cluster, &containers[0]) - // if user customizes the liveness probe timeout, we need to adjust the failure threshold - addLivenessProbeFailureThreshold(cluster, &containers[0]) + // use the custom probe configuration if provided + ensureCustomProbesConfiguration(&cluster, &containers[0]) - return containers -} + // ensure a proper threshold is set + if containers[0].StartupProbe.FailureThreshold == 0 { + containers[0].StartupProbe.FailureThreshold = getFailureThreshold( + cluster.GetMaxStartDelay(), + containers[0].StartupProbe.PeriodSeconds, + ) + } -// adjust the liveness probe failure threshold based on the `spec.livenessProbeTimeout` value -func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.Container) { - if cluster.Spec.LivenessProbeTimeout != nil { - timeout := *cluster.Spec.LivenessProbeTimeout - container.LivenessProbe.FailureThreshold = getLivenessProbeFailureThreshold(timeout) + if cluster.Spec.LivenessProbeTimeout != nil && containers[0].LivenessProbe.FailureThreshold == 0 { + containers[0].LivenessProbe.FailureThreshold = getFailureThreshold( + *cluster.Spec.LivenessProbeTimeout, + containers[0].LivenessProbe.PeriodSeconds, + ) } + + return containers } -// getStartupProbeFailureThreshold get the startup probe failure threshold -// FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1 -func getStartupProbeFailureThreshold(startupDelay int32) int32 { - if startupDelay <= StartupProbePeriod { - return 1 +// ensureCustomProbesConfiguration applies the custom probe configuration +// if specified inside the cluster specification +func ensureCustomProbesConfiguration(cluster *apiv1.Cluster, container *corev1.Container) { + // No probes configuration + if cluster.Spec.Probes == nil { + return } - return int32(math.Ceil(float64(startupDelay) / float64(StartupProbePeriod))) + + // There's no need to check for nils here because a nil probe specification + // will result in no change in the Kubernetes probe. + cluster.Spec.Probes.Liveness.ApplyInto(container.LivenessProbe) + cluster.Spec.Probes.Readiness.ApplyInto(container.ReadinessProbe) + cluster.Spec.Probes.Startup.ApplyInto(container.StartupProbe) } -// getLivenessProbeFailureThreshold get the liveness probe failure threshold -// FAILURE_THRESHOLD = ceil(livenessTimeout / periodSeconds) and minimum value is 1 -func getLivenessProbeFailureThreshold(livenessTimeout int32) int32 { - if livenessTimeout <= LivenessProbePeriod { +// getFailureThreshold get the startup probe failure threshold +// FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1 +func getFailureThreshold(startupDelay, period int32) int32 { + if startupDelay <= period { return 1 } - return int32(math.Ceil(float64(livenessTimeout) / float64(LivenessProbePeriod))) + return int32(math.Ceil(float64(startupDelay) / float64(period))) } // CreateAffinitySection creates the affinity sections for Pods, given the configuration @@ -405,15 +450,63 @@ func CreatePodSecurityContext(seccompProfile *corev1.SeccompProfile, user, group } } -// PodWithExistingStorage create a new instance with an existing storage -func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod { +// NewInstance creates a new instance Pod with the plugin patches applied +func NewInstance( + ctx context.Context, + cluster apiv1.Cluster, + nodeSerial int, + // tlsEnabled TODO: remove when we drop the support for the instances created without TLS + tlsEnabled bool, +) (*corev1.Pod, error) { + contextLogger := log.FromContext(ctx).WithName("new_instance") + + pod, err := buildInstance(cluster, nodeSerial, tlsEnabled) + if err != nil { + return nil, err + } + + defer func() { + if pod == nil { + return + } + if podSpecMarshaled, marshalErr := json.Marshal(pod.Spec); marshalErr == nil { + pod.Annotations[utils.PodSpecAnnotationName] = string(podSpecMarshaled) + } + }() + + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) + if pluginClient == nil { + contextLogger.Trace("skipping NewInstance, cannot find the plugin client inside the context") + return pod, nil + } + + contextLogger.Trace("correctly loaded the plugin client for instance evaluation") + + podClientObject, err := pluginClient.LifecycleHook(ctx, plugin.OperationVerbEvaluate, &cluster, pod) + if err != nil { + return nil, fmt.Errorf("while invoking the lifecycle instance evaluation hook: %w", err) + } + + var ok bool + pod, ok = podClientObject.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("while casting the clientObject to the pod type") + } + + return pod, nil +} + +func buildInstance( + cluster apiv1.Cluster, + nodeSerial int, + tlsEnabled bool, +) (*corev1.Pod, error) { podName := GetInstanceName(cluster.Name, nodeSerial) gracePeriod := int64(cluster.GetMaxStopDelay()) envConfig := CreatePodEnvConfig(cluster, podName) - tlsEnabled := true - podSpec := CreateClusterPodSpec(podName, cluster, envConfig, gracePeriod, tlsEnabled) + podSpec := createClusterPodSpec(podName, cluster, envConfig, gracePeriod, tlsEnabled) pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -432,10 +525,6 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod { Spec: podSpec, } - if podSpecMarshaled, err := json.Marshal(podSpec); err == nil { - pod.Annotations[utils.PodSpecAnnotationName] = string(podSpecMarshaled) - } - if cluster.Spec.PriorityClassName != "" { pod.Spec.PriorityClassName = cluster.Spec.PriorityClassName } @@ -447,7 +536,28 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod { if utils.IsAnnotationAppArmorPresent(&pod.Spec, cluster.Annotations) { utils.AnnotateAppArmor(&pod.ObjectMeta, &pod.Spec, cluster.Annotations) } - return pod + + if jsonPatch := cluster.Annotations[utils.PodPatchAnnotationName]; jsonPatch != "" { + serializedObject, err := json.Marshal(pod) + if err != nil { + return nil, fmt.Errorf("while serializing pod to JSON: %w", err) + } + patch, err := jsonpatch.DecodePatch([]byte(jsonPatch)) + if err != nil { + return nil, fmt.Errorf("while decoding JSON patch from annotation: %w", err) + } + + serializedObject, err = patch.Apply(serializedObject) + if err != nil { + return nil, fmt.Errorf("while applying JSON patch from annotation: %w", err) + } + + if err = json.Unmarshal(serializedObject, pod); err != nil { + return nil, fmt.Errorf("while deserializing pod to JSON: %w", err) + } + } + + return pod, nil } // GetInstanceName returns a string indicating the instance name diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index f9b8258ac6..ac2eba31d2 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -26,6 +29,7 @@ import ( v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -332,6 +336,10 @@ var _ = Describe("EnvConfig", func() { Name: "CLUSTER_NAME", Value: cluster.Name, }, + { + Name: "PSQL_HISTORY", + Value: postgres.TemporaryDirectory + "/.psql_history", + }, { Name: "PGPORT", Value: strconv.Itoa(postgres.ServerPort), @@ -340,6 +348,10 @@ var _ = Describe("EnvConfig", func() { Name: "PGHOST", Value: postgres.SocketDirectory, }, + { + Name: "TMPDIR", + Value: postgres.TemporaryDirectory, + }, { Name: "TEST_ENV", Value: "EXPECTED", @@ -385,6 +397,10 @@ var _ = Describe("EnvConfig", func() { Name: "PGHOST", Value: postgres.SocketDirectory, }, + { + Name: "TMPDIR", + Value: postgres.TemporaryDirectory, + }, { Name: "TEST_ENV", Value: "UNEXPECTED", @@ -905,20 +921,50 @@ var _ = Describe("PodSpec drift detection", func() { var _ = Describe("Compute startup probe failure threshold", func() { It("should take the minimum value 1", func() { - Expect(getStartupProbeFailureThreshold(5)).To(BeNumerically("==", 1)) + Expect(getFailureThreshold(5, StartupProbePeriod)).To(BeNumerically("==", 1)) + Expect(getFailureThreshold(5, LivenessProbePeriod)).To(BeNumerically("==", 1)) }) It("should take the value from 'startDelay / periodSeconds'", func() { - Expect(getStartupProbeFailureThreshold(109)).To(BeNumerically("==", 11)) + Expect(getFailureThreshold(109, StartupProbePeriod)).To(BeNumerically("==", 11)) + Expect(getFailureThreshold(31, LivenessProbePeriod)).To(BeNumerically("==", 4)) }) }) -var _ = Describe("Compute liveness probe failure threshold", func() { - It("should take the minimum value 1", func() { - Expect(getLivenessProbeFailureThreshold(5)).To(BeNumerically("==", 1)) +var _ = Describe("NewInstance", func() { + It("applies JSON patch from annotation", func(ctx SpecContext) { + cluster := v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/containers/0/image", "value": "new-image:latest"}]`, // nolint: lll + }, + }, + Status: v1.ClusterStatus{ + Image: "test", + }, + } + + pod, err := NewInstance(ctx, cluster, 1, true) + Expect(err).NotTo(HaveOccurred()) + Expect(pod).NotTo(BeNil()) + Expect(pod.Spec.Containers[0].Image).To(Equal("new-image:latest")) }) - It("should take the value from 'startDelay / periodSeconds'", func() { - Expect(getLivenessProbeFailureThreshold(31)).To(BeNumerically("==", 4)) + It("returns error if JSON patch is invalid", func(ctx SpecContext) { + cluster := v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `invalid-json-patch`, + }, + }, + } + + _, err := NewInstance(ctx, cluster, 1, true) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("while decoding JSON patch from annotation")) }) }) diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go index 54c9328d8a..8317aed787 100644 --- a/pkg/specs/podspec_diff.go +++ b/pkg/specs/podspec_diff.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -26,8 +29,6 @@ import ( // ComparePodSpecs compares two pod specs, returns true iff they are equivalent, and // if they are not, points out the first discrepancy. -// This function matches CreateClusterPodSpec, specifically it looks in more detail -// and ignores reordering of volume mounts and containers func ComparePodSpecs( currentPodSpec, targetPodSpec corev1.PodSpec, ) (bool, string) { @@ -39,7 +40,21 @@ func ComparePodSpecs( return compareContainers(currentPodSpec.Containers, targetPodSpec.Containers) }, "init-containers": func() (bool, string) { - return compareContainers(currentPodSpec.InitContainers, targetPodSpec.InitContainers) + extractContainersForComparison := func(passedContainers []corev1.Container) []corev1.Container { + var containers []corev1.Container + for _, container := range passedContainers { + if container.Name == BootstrapControllerContainerName { + // ignore the bootstrap controller init container. We handle it inside checkPodSpecIsOutdated. + continue + } + containers = append(containers, container) + } + return containers + } + return compareContainers( + extractContainersForComparison(currentPodSpec.InitContainers), + extractContainersForComparison(targetPodSpec.InitContainers), + ) }, } @@ -77,8 +92,9 @@ func ComparePodSpecs( return currentPodSpec.Hostname == targetPodSpec.Hostname }, "termination-grace-period": func() bool { - return currentPodSpec.TerminationGracePeriodSeconds == nil && targetPodSpec.TerminationGracePeriodSeconds == nil || - *currentPodSpec.TerminationGracePeriodSeconds == *targetPodSpec.TerminationGracePeriodSeconds + return (currentPodSpec.TerminationGracePeriodSeconds == nil && targetPodSpec.TerminationGracePeriodSeconds == nil) || + (currentPodSpec.TerminationGracePeriodSeconds != nil && targetPodSpec.TerminationGracePeriodSeconds != nil && + *currentPodSpec.TerminationGracePeriodSeconds == *targetPodSpec.TerminationGracePeriodSeconds) }, } @@ -176,12 +192,15 @@ func doContainersMatch(currentContainer, targetContainer corev1.Container) (bool "liveness-probe": func() bool { return reflect.DeepEqual(currentContainer.LivenessProbe, targetContainer.LivenessProbe) }, + "startup-probe": func() bool { + return reflect.DeepEqual(currentContainer.StartupProbe, targetContainer.StartupProbe) + }, "command": func() bool { return reflect.DeepEqual(currentContainer.Command, targetContainer.Command) }, "resources": func() bool { // semantic equality will compare the two objects semantically, not only numbers - return equality.Semantic.Equalities.DeepEqual( + return equality.Semantic.DeepEqual( currentContainer.Resources, targetContainer.Resources, ) diff --git a/pkg/specs/podspec_diff_test.go b/pkg/specs/podspec_diff_test.go index 5869d68221..8ac5f50f3c 100644 --- a/pkg/specs/podspec_diff_test.go +++ b/pkg/specs/podspec_diff_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs import ( + corev1 "k8s.io/api/core/v1" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -37,4 +42,55 @@ var _ = Describe("PodSpecDiff", func() { It("returns false for empty volume name", func() { Expect(shouldIgnoreCurrentVolume("")).To(BeFalse()) }) + + It("return false when the startup probe do not match and true otherwise", func() { + containerPre := corev1.Container{ + StartupProbe: &corev1.Probe{ + TimeoutSeconds: 23, + }, + } + containerPost := corev1.Container{ + StartupProbe: &corev1.Probe{ + TimeoutSeconds: 24, + }, + } + Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue()) + status, diff := doContainersMatch(containerPre, containerPost) + Expect(status).To(BeFalse()) + Expect(diff).To(Equal("startup-probe")) + }) + + It("return false when the liveness probe do not match and true otherwise", func() { + containerPre := corev1.Container{ + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 23, + }, + } + containerPost := corev1.Container{ + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 24, + }, + } + Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue()) + status, diff := doContainersMatch(containerPre, containerPost) + Expect(status).To(BeFalse()) + Expect(diff).To(Equal("liveness-probe")) + }) + + It("return false when the readiness probe do not match and true otherwise", func() { + containerPre := corev1.Container{ + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 23, + }, + } + containerPost := corev1.Container{ + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 24, + }, + } + Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue()) + status, diff := doContainersMatch(containerPre, containerPost) + Expect(status).To(BeFalse()) + Expect(diff).To(Equal("readiness-probe")) + }) }) diff --git a/pkg/specs/rolebinding.go b/pkg/specs/rolebinding.go index d532e2f61c..d5968cec6f 100644 --- a/pkg/specs/rolebinding.go +++ b/pkg/specs/rolebinding.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/rolebinding_test.go b/pkg/specs/rolebinding_test.go index c101194b19..3ba0cfca00 100644 --- a/pkg/specs/rolebinding_test.go +++ b/pkg/specs/rolebinding_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go index e48a3b6ad4..4567307d5c 100644 --- a/pkg/specs/roles.go +++ b/pkg/specs/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -19,11 +22,11 @@ package specs import ( "slices" + "github.com/cloudnative-pg/machinery/pkg/stringset" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) // CreateRole create a role with the permissions needed by the instance manager @@ -154,6 +157,95 @@ func CreateRole(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) rbacv1.Role { "update", }, }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "publications", + }, + Verbs: []string{ + "get", + "update", + "list", + "watch", + }, + ResourceNames: []string{}, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "publications/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + }, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "subscriptions", + }, + Verbs: []string{ + "get", + "update", + "list", + "watch", + }, + ResourceNames: []string{}, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "subscriptions/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + }, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "failoverquorums", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + ResourceNames: []string{ + cluster.Name, + }, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "failoverquorums/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + "watch", + }, + ResourceNames: []string{ + cluster.Name, + }, + }, } return rbacv1.Role{ @@ -237,13 +329,13 @@ func externalClusterSecrets(cluster apiv1.Cluster) []string { if barmanObjStore := server.BarmanObjectStore; barmanObjStore != nil { result = append( result, - s3CredentialsSecrets(barmanObjStore.BarmanCredentials.AWS)...) + s3CredentialsSecrets(barmanObjStore.AWS)...) result = append( result, - azureCredentialsSecrets(barmanObjStore.BarmanCredentials.Azure)...) + azureCredentialsSecrets(barmanObjStore.Azure)...) result = append( result, - googleCredentialsSecrets(barmanObjStore.BarmanCredentials.Google)...) + googleCredentialsSecrets(barmanObjStore.Google)...) if barmanObjStore.EndpointCA != nil { result = append(result, barmanObjStore.EndpointCA.Name) } @@ -260,13 +352,13 @@ func backupSecrets(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) []string { if cluster.Spec.Backup != nil && cluster.Spec.Backup.BarmanObjectStore != nil { result = append( result, - s3CredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.BarmanCredentials.AWS)...) + s3CredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.AWS)...) result = append( result, - azureCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.BarmanCredentials.Azure)...) + azureCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.Azure)...) result = append( result, - googleCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.BarmanCredentials.Google)...) + googleCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.Google)...) } // Secrets needed by Barman, if set @@ -279,13 +371,13 @@ func backupSecrets(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) []string { if backupOrigin != nil { result = append( result, - s3CredentialsSecrets(backupOrigin.Status.BarmanCredentials.AWS)...) + s3CredentialsSecrets(backupOrigin.Status.AWS)...) result = append( result, - azureCredentialsSecrets(backupOrigin.Status.BarmanCredentials.Azure)...) + azureCredentialsSecrets(backupOrigin.Status.Azure)...) result = append( result, - googleCredentialsSecrets(backupOrigin.Status.BarmanCredentials.Google)...) + googleCredentialsSecrets(backupOrigin.Status.Google)...) } return result diff --git a/pkg/specs/roles_test.go b/pkg/specs/roles_test.go index 3753a66154..8808163d46 100644 --- a/pkg/specs/roles_test.go +++ b/pkg/specs/roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -165,7 +168,7 @@ var _ = Describe("Roles", func() { serviceAccount := CreateRole(cluster, nil) Expect(serviceAccount.Name).To(Equal(cluster.Name)) Expect(serviceAccount.Namespace).To(Equal(cluster.Namespace)) - Expect(serviceAccount.Rules).To(HaveLen(9)) + Expect(serviceAccount.Rules).To(HaveLen(15)) }) It("should contain every secret of the origin backup and backup configuration of every external cluster", func() { diff --git a/pkg/specs/secrets.go b/pkg/specs/secrets.go index 2e66a497e7..52888fc6f2 100644 --- a/pkg/specs/secrets.go +++ b/pkg/specs/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -23,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -35,15 +39,38 @@ func CreateSecret( dbname string, username string, password string, + usertype utils.UserType, ) *corev1.Secret { - uriBuilder := newConnectionStringBuilder(hostname, dbname, username, password, namespace) + hostWithNamespace := fmt.Sprintf("%s.%s:%d", hostname, namespace, postgres.ServerPort) + hostWithFQDN := fmt.Sprintf( + "%s.%s.svc.%s:%d", + hostname, + namespace, + configuration.Current.KubernetesClusterDomain, + postgres.ServerPort, + ) + + namespacedBuilder := &connectionStringBuilder{ + host: hostWithNamespace, + dbname: dbname, + username: username, + password: password, + } + + fqdnBuilder := &connectionStringBuilder{ + host: hostWithFQDN, + dbname: dbname, + username: username, + password: password, + } return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Labels: map[string]string{ - utils.WatchedLabelName: "true", + utils.UserTypeLabelName: string(usertype), + utils.WatchedLabelName: "true", }, }, Type: corev1.SecretTypeBasicAuth, @@ -61,28 +88,19 @@ func CreateSecret( dbname, username, password), - "uri": uriBuilder.buildPostgres(), - "jdbc-uri": uriBuilder.buildJdbc(), + "uri": namespacedBuilder.buildPostgres(), + "jdbc-uri": namespacedBuilder.buildJdbc(), + "fqdn-uri": fqdnBuilder.buildPostgres(), + "fqdn-jdbc-uri": fqdnBuilder.buildJdbc(), }, } } type connectionStringBuilder struct { - host string - dbname string - username string - password string - namespace string -} - -func newConnectionStringBuilder(hostname, dbname, username, password, namespace string) *connectionStringBuilder { - return &connectionStringBuilder{ - host: fmt.Sprintf("%s.%s:%d", hostname, namespace, postgres.ServerPort), - dbname: dbname, - username: username, - password: password, - namespace: namespace, - } + host string + dbname string + username string + password string } func (c connectionStringBuilder) buildPostgres() string { diff --git a/pkg/specs/secrets_test.go b/pkg/specs/secrets_test.go index 0807c90817..8693fadbf9 100644 --- a/pkg/specs/secrets_test.go +++ b/pkg/specs/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs import ( + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -24,7 +29,7 @@ import ( var _ = Describe("Secret creation", func() { It("create a secret with the right user and password", func() { secret := CreateSecret("name", "namespace", - "thishost", "thisdb", "thisuser", "thispassword") + "thishost", "thisdb", "thisuser", "thispassword", utils.UserTypeApp) Expect(secret.Name).To(Equal("name")) Expect(secret.Namespace).To(Equal("namespace")) Expect(secret.StringData["username"]).To(Equal("thisuser")) @@ -39,5 +44,15 @@ var _ = Describe("Secret creation", func() { Expect(secret.StringData["jdbc-uri"]).To( Equal("jdbc:postgresql://thishost.namespace:5432/thisdb?password=thispassword&user=thisuser"), ) + + Expect(secret.StringData["fqdn-uri"]).To( + Equal("postgresql://thisuser:thispassword@thishost.namespace.svc.cluster.local:5432/thisdb"), + ) + Expect(secret.StringData["fqdn-jdbc-uri"]).To( + Equal("jdbc:postgresql://thishost.namespace.svc.cluster.local:5432/thisdb?password=thispassword&user=thisuser"), + ) + + Expect(secret.Labels).To( + HaveKeyWithValue(utils.UserTypeLabelName, string(utils.UserTypeApp))) }) }) diff --git a/pkg/specs/serviceaccount.go b/pkg/specs/serviceaccount.go index 0862e308ec..3e0758edbe 100644 --- a/pkg/specs/serviceaccount.go +++ b/pkg/specs/serviceaccount.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/serviceaccount_test.go b/pkg/specs/serviceaccount_test.go index 066df1107b..15f16abd28 100644 --- a/pkg/specs/serviceaccount_test.go +++ b/pkg/specs/serviceaccount_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/services.go b/pkg/specs/services.go index e7978e6c6f..28b58e3ab9 100644 --- a/pkg/specs/services.go +++ b/pkg/specs/services.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -155,7 +158,8 @@ func BuildManagedServices(cluster apiv1.Cluster) ([]corev1.Service, error) { SetSelectors(defaultService.Spec.Selector) for idx := range defaultService.Spec.Ports { - builder = builder.WithServicePort(&defaultService.Spec.Ports[idx]) + // we preserve the user settings over the default configuration, issue: #6389 + builder = builder.WithServicePortNoOverwrite(&defaultService.Spec.Ports[idx]) } for key, value := range defaultService.Labels { diff --git a/pkg/specs/services_test.go b/pkg/specs/services_test.go index a7c0922b98..2fc0ff0cc6 100644 --- a/pkg/specs/services_test.go +++ b/pkg/specs/services_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -19,8 +22,10 @@ package specs import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" @@ -33,6 +38,12 @@ var _ = Describe("Services specification", func() { Name: "clustername", }, } + expectedPort := corev1.ServicePort{ + Name: PostgresContainerName, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt32(postgres.ServerPort), + Port: postgres.ServerPort, + } It("create a configured -any service", func() { service := CreateClusterAnyService(postgresql) @@ -40,6 +51,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeTrue()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance))) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) It("create a configured -r service", func() { @@ -48,6 +61,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance))) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) It("create a configured -ro service", func() { @@ -56,6 +71,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.ClusterInstanceRoleLabelName]).To(Equal(ClusterRoleLabelReplica)) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) It("create a configured -rw service", func() { @@ -64,6 +81,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.ClusterInstanceRoleLabelName]).To(Equal(ClusterRoleLabelPrimary)) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) }) @@ -136,6 +155,30 @@ var _ = Describe("BuildManagedServices", func() { Expect(services[0].ObjectMeta.Labels).To(HaveKeyWithValue(utils.IsManagedLabelName, "true")) Expect(services[0].ObjectMeta.Labels).To(HaveKeyWithValue("test-label", "test-value")) Expect(services[0].ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation", "test-value")) + Expect(services[0].Spec.Ports).To(ContainElement(corev1.ServicePort{ + Name: PostgresContainerName, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt32(postgres.ServerPort), + Port: postgres.ServerPort, + NodePort: 0, + })) + }) + + It("should not overwrite the user specified service port with the default one", func() { + cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.Spec.Ports = []corev1.ServicePort{ + { + Name: PostgresContainerName, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt32(postgres.ServerPort), + Port: postgres.ServerPort, + NodePort: 5533, + }, + } + services, err := BuildManagedServices(cluster) + Expect(err).NotTo(HaveOccurred()) + Expect(services).NotTo(BeNil()) + Expect(services).To(HaveLen(1)) + Expect(services[0].Spec.Ports[0].NodePort).To(Equal(int32(5533))) }) }) }) diff --git a/pkg/specs/suite_test.go b/pkg/specs/suite_test.go index 32e32787f5..ce425b74f5 100644 --- a/pkg/specs/suite_test.go +++ b/pkg/specs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/volumes.go b/pkg/specs/volumes.go index ffa755054f..f77ff6c023 100644 --- a/pkg/specs/volumes.go +++ b/pkg/specs/volumes.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -19,6 +22,7 @@ package specs import ( "fmt" "path" + "path/filepath" "sort" "strings" @@ -144,6 +148,9 @@ func createPostgresVolumes(cluster *apiv1.Cluster, podName string) []corev1.Volu if cluster.ShouldCreateProjectedVolume() { result = append(result, createProjectedVolume(cluster)) } + + result = append(result, createExtensionVolumes(cluster)...) + return result } @@ -219,7 +226,9 @@ func createVolumesAndVolumeMountsForSQLRefs( return volumes, volumeMounts } -func createPostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount { +// CreatePostgresVolumeMounts creates the volume mounts that are used +// by PostgreSQL Pods +func CreatePostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount { volumeMounts := []corev1.VolumeMount{ { Name: "pgdata", @@ -270,6 +279,9 @@ func createPostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount { ) } } + + volumeMounts = append(volumeMounts, createExtensionVolumeMounts(&cluster)...) + return volumeMounts } @@ -308,3 +320,33 @@ func createProjectedVolume(cluster *apiv1.Cluster) corev1.Volume { }, } } + +func createExtensionVolumes(cluster *apiv1.Cluster) []corev1.Volume { + extensionVolumes := make([]corev1.Volume, 0, len(cluster.Spec.PostgresConfiguration.Extensions)) + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + extensionVolumes = append(extensionVolumes, + corev1.Volume{ + Name: extension.Name, + VolumeSource: corev1.VolumeSource{ + Image: &extension.ImageVolumeSource, + }, + }, + ) + } + + return extensionVolumes +} + +func createExtensionVolumeMounts(cluster *apiv1.Cluster) []corev1.VolumeMount { + extensionVolumeMounts := make([]corev1.VolumeMount, 0, len(cluster.Spec.PostgresConfiguration.Extensions)) + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + extensionVolumeMounts = append(extensionVolumeMounts, + corev1.VolumeMount{ + Name: extension.Name, + MountPath: filepath.Join(postgres.ExtensionsBaseDirectory, extension.Name), + }, + ) + } + + return extensionVolumeMounts +} diff --git a/pkg/specs/volumes_test.go b/pkg/specs/volumes_test.go index 97cd88844e..1cce8d6917 100644 --- a/pkg/specs/volumes_test.go +++ b/pkg/specs/volumes_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs @@ -19,9 +22,11 @@ package specs import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -295,7 +300,7 @@ var _ = Describe("test createVolumesAndVolumeMountsForSQLRefs", func() { var _ = DescribeTable("test creation of volume mounts", func(cluster apiv1.Cluster, mounts []corev1.VolumeMount) { - mts := createPostgresVolumeMounts(cluster) + mts := CreatePostgresVolumeMounts(cluster) Expect(mts).NotTo(BeEmpty()) for _, mt := range mounts { Expect(mts).To(ContainElement(mt)) @@ -518,3 +523,78 @@ var _ = Describe("createEphemeralVolume", func() { Expect(*ephemeralVolume.VolumeSource.EmptyDir.SizeLimit).To(Equal(quantity)) }) }) + +var _ = Describe("ImageVolume Extensions", func() { + var cluster apiv1.Cluster + + BeforeEach(func() { + cluster = apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "foo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "foo:dev", + }, + }, + { + Name: "bar", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "bar:dev", + }, + }, + }, + }, + }, + } + }) + + Context("createExtensionVolumes", func() { + When("Extensions are disabled", func() { + It("shouldn't create Volumes", func() { + cluster.Spec.PostgresConfiguration.Extensions = []apiv1.ExtensionConfiguration{} + extensionVolumes := createExtensionVolumes(&cluster) + Expect(extensionVolumes).To(BeEmpty()) + }) + }) + When("Extensions are enabled", func() { + It("should create a Volume for each Extension", func() { + extensionVolumes := createExtensionVolumes(&cluster) + Expect(len(extensionVolumes)).To(BeEquivalentTo(2)) + Expect(extensionVolumes[0].Name).To(Equal("foo")) + Expect(extensionVolumes[0].VolumeSource.Image.Reference).To(Equal("foo:dev")) + Expect(extensionVolumes[1].Name).To(Equal("bar")) + Expect(extensionVolumes[1].VolumeSource.Image.Reference).To(Equal("bar:dev")) + }) + }) + }) + + Context("createExtensionVolumeMounts", func() { + When("Extensions are disabled", func() { + It("shouldn't create VolumeMounts", func() { + cluster.Spec.PostgresConfiguration.Extensions = []apiv1.ExtensionConfiguration{} + extensionVolumeMounts := createExtensionVolumeMounts(&cluster) + Expect(extensionVolumeMounts).To(BeEmpty()) + }) + }) + When("Extensions are enabled", func() { + It("should create a VolumeMount for each Extension", func() { + const ( + fooMountPath = postgres.ExtensionsBaseDirectory + "/foo" + barMountPath = postgres.ExtensionsBaseDirectory + "/bar" + ) + extensionVolumeMounts := createExtensionVolumeMounts(&cluster) + Expect(len(extensionVolumeMounts)).To(BeEquivalentTo(2)) + Expect(extensionVolumeMounts[0].Name).To(Equal("foo")) + Expect(extensionVolumeMounts[0].MountPath).To(Equal(fooMountPath)) + Expect(extensionVolumeMounts[1].Name).To(Equal("bar")) + Expect(extensionVolumeMounts[1].MountPath).To(Equal(barMountPath)) + }) + }) + }) +}) diff --git a/pkg/stringset/stringset.go b/pkg/stringset/stringset.go deleted file mode 100644 index f5678ec4a0..0000000000 --- a/pkg/stringset/stringset.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stringset implements a basic set of strings -package stringset - -import ( - "slices" -) - -// Data represent a set of strings -type Data struct { - innerMap map[string]struct{} -} - -// New create a new empty set of strings -func New() *Data { - return &Data{ - innerMap: make(map[string]struct{}), - } -} - -// From create a empty set of strings given -// a slice of strings -func From(strings []string) *Data { - result := New() - for _, value := range strings { - result.Put(value) - } - return result -} - -// FromKeys create a string set from the -// keys of a map -func FromKeys[T any](v map[string]T) *Data { - result := New() - for key := range v { - result.Put(key) - } - return result -} - -// Put a string in the set -func (set *Data) Put(key string) { - set.innerMap[key] = struct{}{} -} - -// Delete deletes a string from the set. If the string doesn't exist -// this is a no-op -func (set *Data) Delete(key string) { - delete(set.innerMap, key) -} - -// Has checks if a string is in the set or not -func (set *Data) Has(key string) bool { - _, ok := set.innerMap[key] - return ok -} - -// Len returns the map of the set -func (set *Data) Len() int { - return len(set.innerMap) -} - -// ToList returns the strings contained in this set as -// a string slice -func (set *Data) ToList() (result []string) { - result = make([]string, 0, len(set.innerMap)) - for key := range set.innerMap { - result = append(result, key) - } - return -} - -// ToSortedList returns the string container in this set -// as a sorted string slice -func (set *Data) ToSortedList() []string { - result := set.ToList() - slices.Sort(result) - return result -} - -// Eq compares two string sets for equality -func (set *Data) Eq(other *Data) bool { - if set == nil || other == nil { - return false - } - - if set.Len() != other.Len() { - return false - } - - for key := range set.innerMap { - if !other.Has(key) { - return false - } - } - - return true -} diff --git a/pkg/stringset/stringset_test.go b/pkg/stringset/stringset_test.go deleted file mode 100644 index abf6e5548b..0000000000 --- a/pkg/stringset/stringset_test.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package stringset - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("String set", func() { - It("starts as an empty set", func() { - Expect(New().Len()).To(Equal(0)) - }) - - It("starts with a list of strings", func() { - Expect(From([]string{"one", "two"}).Len()).To(Equal(2)) - Expect(From([]string{"one", "two", "two"}).Len()).To(Equal(2)) - }) - - It("store string keys", func() { - set := New() - Expect(set.Has("test")).To(BeFalse()) - Expect(set.Has("test2")).To(BeFalse()) - - set.Put("test") - Expect(set.Has("test")).To(BeTrue()) - Expect(set.Has("test2")).To(BeFalse()) - }) - - It("removes string keys", func() { - set := From([]string{"one", "two"}) - set.Delete("one") - Expect(set.ToList()).To(Equal([]string{"two"})) - }) - - It("constructs a string slice given a set", func() { - Expect(From([]string{"one", "two"}).ToList()).To(ContainElements("one", "two")) - }) - - It("compares two string set for equality", func() { - Expect(From([]string{"one", "two"}).Eq(From([]string{"one", "two"}))).To(BeTrue()) - Expect(From([]string{"one", "two"}).Eq(From([]string{"two", "three"}))).To(BeFalse()) - Expect(From([]string{"one", "two"}).Eq(From([]string{"one", "two", "three"}))).To(BeFalse()) - Expect(From([]string{"one", "two", "three"}).Eq(From([]string{"one", "two"}))).To(BeFalse()) - }) - - It("constructs a sorted string slice given a set", func() { - Expect(From([]string{"one", "two", "three", "four"}).ToSortedList()).To( - HaveExactElements("four", "one", "three", "two")) - Expect(New().ToList()).To(BeEmpty()) - }) - - It("constructs a string set from a map having string as keys", func() { - Expect(FromKeys(map[string]int{ - "one": 1, - "two": 2, - "three": 3, - }).ToSortedList()).To( - HaveExactElements("one", "three", "two"), - ) - }) -}) diff --git a/pkg/system/compatibility/darwin.go b/pkg/system/compatibility/darwin.go index 83efd17ac3..271b27a80b 100644 --- a/pkg/system/compatibility/darwin.go +++ b/pkg/system/compatibility/darwin.go @@ -2,7 +2,8 @@ // +build darwin /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,12 +16,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package compatibility provides a layer to cross-compile with other OS than Linux package compatibility -// SetCoredumpFilter for Windows compatibility +// SetCoredumpFilter for darwin compatibility func SetCoredumpFilter(_ string) error { return nil } diff --git a/pkg/system/compatibility/doc.go b/pkg/system/compatibility/doc.go new file mode 100644 index 0000000000..fbec0a7e3a --- /dev/null +++ b/pkg/system/compatibility/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package compatibility provides a layer to cross-compile with other OS than Linux +package compatibility diff --git a/pkg/system/compatibility/unix.go b/pkg/system/compatibility/unix.go index 9b17bf5af5..d860d250d5 100644 --- a/pkg/system/compatibility/unix.go +++ b/pkg/system/compatibility/unix.go @@ -2,7 +2,8 @@ // +build linux /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package compatibility provides a layer to cross-compile with other OS than Linux diff --git a/pkg/system/compatibility/windows.go b/pkg/system/compatibility/windows.go index af7334301d..9077194c77 100644 --- a/pkg/system/compatibility/windows.go +++ b/pkg/system/compatibility/windows.go @@ -2,7 +2,8 @@ // +build windows /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package compatibility provides a layer to cross-compile with other OS than Linux diff --git a/pkg/system/suite_test.go b/pkg/system/suite_test.go index e68e62963b..a1806b574a 100644 --- a/pkg/system/suite_test.go +++ b/pkg/system/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package system diff --git a/pkg/system/system.go b/pkg/system/system.go index dcc80e1a62..a6371ff2aa 100644 --- a/pkg/system/system.go +++ b/pkg/system/system.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package system provides an interface with the operating system diff --git a/pkg/system/system_test.go b/pkg/system/system_test.go index e5682a789c..0feb9a27c1 100644 --- a/pkg/system/system_test.go +++ b/pkg/system/system_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package system diff --git a/pkg/utils/conditions.go b/pkg/utils/conditions.go index fa1afcac11..918b97423c 100644 --- a/pkg/utils/conditions.go +++ b/pkg/utils/conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/conditions_test.go b/pkg/utils/conditions_test.go index edb1ed9780..b03e92f5d2 100644 --- a/pkg/utils/conditions_test.go +++ b/pkg/utils/conditions_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/context.go b/pkg/utils/context/context.go similarity index 75% rename from pkg/utils/context.go rename to pkg/utils/context/context.go index e91aebab61..a1915a0b12 100644 --- a/pkg/utils/context.go +++ b/pkg/utils/context/context.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package context // contextKey a type used to assign values inside the context type contextKey string @@ -24,3 +27,6 @@ const ContextKeyCluster contextKey = "cluster" // PluginClientKey is the context key holding cluster data const PluginClientKey contextKey = "pluginClient" + +// GRPCTimeoutKey is the context key holding the gRPC timeout +const GRPCTimeoutKey contextKey = "grpcTimeout" diff --git a/pkg/utils/context/doc.go b/pkg/utils/context/doc.go new file mode 100644 index 0000000000..b8a7757ef7 --- /dev/null +++ b/pkg/utils/context/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package context contains utility functions to work with context.Context +package context diff --git a/pkg/utils/discovery.go b/pkg/utils/discovery.go index b0479db081..2c9a602144 100644 --- a/pkg/utils/discovery.go +++ b/pkg/utils/discovery.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -225,7 +228,7 @@ func detectAvailableArchitectures(filepathGlob string) error { // DetectAvailableArchitectures detects the architectures available in the cluster func DetectAvailableArchitectures() error { - return detectAvailableArchitectures("bin/manager_*") + return detectAvailableArchitectures("operator/manager_*") } // DetectOLM looks for the operators.coreos.com operators resource in the current diff --git a/pkg/utils/discovery_test.go b/pkg/utils/discovery_test.go index 6df7bfc3dd..50a7535b2f 100644 --- a/pkg/utils/discovery_test.go +++ b/pkg/utils/discovery_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -43,8 +46,13 @@ var _ = DescribeTable("Kubernetes minor version detection", ) var _ = Describe("Detect resources properly when", func() { - client := fakeClient.NewSimpleClientset() - fakeDiscovery := client.Discovery().(*discoveryFake.FakeDiscovery) + var client *fakeClient.Clientset + var fakeDiscovery *discoveryFake.FakeDiscovery + + BeforeEach(func() { + client = fakeClient.NewClientset() + fakeDiscovery = client.Discovery().(*discoveryFake.FakeDiscovery) + }) It("should not detect PodMonitor resource", func() { exists, err := PodMonitorExist(client.Discovery()) @@ -256,16 +264,14 @@ var _ = Describe("AvailableArchitecture", func() { }) It("should retrieve an existing available architecture", func() { - tempDir, err := os.MkdirTemp("", "test") - Expect(err).NotTo(HaveOccurred()) + tempDir := GinkgoT().TempDir() DeferCleanup(func() { - Expect(os.RemoveAll(tempDir)).To(Succeed()) availableArchitectures = nil }) // Create a sample file Expect(os.WriteFile(filepath.Join(tempDir, "manager_amd64"), []byte("amd64"), 0o600)).To(Succeed()) - err = detectAvailableArchitectures(filepath.Join(tempDir, "manager_*")) + err := detectAvailableArchitectures(filepath.Join(tempDir, "manager_*")) Expect(err).ToNot(HaveOccurred()) Expect(availableArchitectures).To(HaveLen(1)) diff --git a/pkg/utils/exec.go b/pkg/utils/exec.go index f9844f5365..57e88e7a90 100644 --- a/pkg/utils/exec.go +++ b/pkg/utils/exec.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils contains otherwise uncategorized kubernetes diff --git a/pkg/utils/fencing.go b/pkg/utils/fencing.go index 7cedb1cbf5..83ec99599c 100644 --- a/pkg/utils/fencing.go +++ b/pkg/utils/fencing.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -24,12 +27,11 @@ import ( "slices" "sort" + "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) var ( @@ -193,7 +195,7 @@ func (fb *FencingMetadataExecutor) Execute(ctx context.Context, key types.Namesp if name != FenceAllInstances { var pod corev1.Pod if err := fb.cli.Get(ctx, client.ObjectKey{Namespace: key.Namespace, Name: name}, &pod); err != nil { - return fmt.Errorf("node %s not found in namespace %s", name, key.Namespace) + return fmt.Errorf("node %s not found in namespace %s: %w", name, key.Namespace, err) } } } diff --git a/pkg/utils/fencing_test.go b/pkg/utils/fencing_test.go index 27b1043cca..00563141a3 100644 --- a/pkg/utils/fencing_test.go +++ b/pkg/utils/fencing_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/finalizers.go b/pkg/utils/finalizers.go new file mode 100644 index 0000000000..a1f5da62b9 --- /dev/null +++ b/pkg/utils/finalizers.go @@ -0,0 +1,34 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package utils + +const ( + // DatabaseFinalizerName is the name of the finalizer + // triggering the deletion of the database + DatabaseFinalizerName = MetadataNamespace + "/deleteDatabase" + + // PublicationFinalizerName is the name of the finalizer + // triggering the deletion of the publication + PublicationFinalizerName = MetadataNamespace + "/deletePublication" + + // SubscriptionFinalizerName is the name of the finalizer + // triggering the deletion of the subscription + SubscriptionFinalizerName = MetadataNamespace + "/deleteSubscription" +) diff --git a/pkg/utils/hash/doc.go b/pkg/utils/hash/doc.go index f7f47923f9..76b99e7a53 100644 --- a/pkg/utils/hash/doc.go +++ b/pkg/utils/hash/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package hash allows the user to get a hash number for a given Kubernetes diff --git a/pkg/utils/hash/hash.go b/pkg/utils/hash/hash.go index e3f55d62fb..3f02a37310 100644 --- a/pkg/utils/hash/hash.go +++ b/pkg/utils/hash/hash.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hash diff --git a/pkg/utils/hash/hash_test.go b/pkg/utils/hash/hash_test.go index 1dddad3614..8c5e7fa323 100644 --- a/pkg/utils/hash/hash_test.go +++ b/pkg/utils/hash/hash_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hash diff --git a/pkg/utils/hash/suite_test.go b/pkg/utils/hash/suite_test.go index 74e1f06114..fec5faaacf 100644 --- a/pkg/utils/hash/suite_test.go +++ b/pkg/utils/hash/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hash diff --git a/pkg/utils/imagename.go b/pkg/utils/imagename.go deleted file mode 100644 index 0a9e04c4d2..0000000000 --- a/pkg/utils/imagename.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "regexp" - "strings" -) - -var ( - digestRegex = regexp.MustCompile(`@sha256:(?P[a-fA-F0-9]+)$`) - tagRegex = regexp.MustCompile(`:(?P[^/]+)$`) - hostRegex = regexp.MustCompile(`^[^./:]+((\.[^./:]+)+(:[0-9]+)?|:[0-9]+)/`) -) - -// Reference . -type Reference struct { - Name string - Tag string - Digest string -} - -// GetNormalizedName returns the normalized name of a reference -func (r *Reference) GetNormalizedName() (name string) { - name = r.Name - if r.Tag != "" { - name = fmt.Sprintf("%s:%s", name, r.Tag) - } - if r.Digest != "" { - name = fmt.Sprintf("%s@sha256:%s", name, r.Digest) - } - return name -} - -// NewReference parses the image name and returns an error if the name is invalid. -func NewReference(name string) *Reference { - reference := &Reference{} - - if !strings.Contains(name, "/") { - name = "docker.io/library/" + name - } else if !hostRegex.MatchString(name) { - name = "docker.io/" + name - } - - if digestRegex.MatchString(name) { - res := digestRegex.FindStringSubmatch(name) - reference.Digest = res[1] // digest capture group index - name = strings.TrimSuffix(name, res[0]) - } - - if tagRegex.MatchString(name) { - res := tagRegex.FindStringSubmatch(name) - reference.Tag = res[1] // tag capture group index - name = strings.TrimSuffix(name, res[0]) - } else if reference.Digest == "" { - reference.Tag = "latest" - } - - // everything else is the name - reference.Name = name - - return reference -} - -// GetImageTag gets the image tag from a full image string. -// Example: -// -// GetImageTag("postgres") == "latest" -// GetImageTag("ghcr.io/cloudnative-pg/postgresql:12.3") == "12.3" -func GetImageTag(imageName string) string { - ref := NewReference(imageName) - return ref.Tag -} diff --git a/pkg/utils/imagename_test.go b/pkg/utils/imagename_test.go deleted file mode 100644 index 15bf1bec38..0000000000 --- a/pkg/utils/imagename_test.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("image name management", func() { - It("should normalize image names", func() { - Expect(NewReference("postgres").GetNormalizedName()).To( - Equal("docker.io/library/postgres:latest")) - Expect(NewReference("myimage/postgres").GetNormalizedName()).To( - Equal("docker.io/myimage/postgres:latest")) - Expect(NewReference("localhost:5000/postgres").GetNormalizedName()).To( - Equal("localhost:5000/postgres:latest")) - Expect(NewReference("registry.localhost:5000/postgres:14.4").GetNormalizedName()).To( - Equal("registry.localhost:5000/postgres:14.4")) - Expect(NewReference("ghcr.io/cloudnative-pg/postgresql:34").GetNormalizedName()).To( - Equal("ghcr.io/cloudnative-pg/postgresql:34")) - }) - - It("should extract tag names", func() { - Expect(GetImageTag("postgres")).To(Equal("latest")) - Expect(GetImageTag("postgres:34.3")).To(Equal("34.3")) - Expect(GetImageTag("postgres:13@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866")). - To(Equal("13")) - }) - - It("should not extract a tag name", func() { - Expect(GetImageTag("postgres@sha256:cff94dd382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866")). - To(BeEmpty()) - }) -}) diff --git a/pkg/utils/job_conditions.go b/pkg/utils/job_conditions.go index ccbe148caf..fe1864a9d1 100644 --- a/pkg/utils/job_conditions.go +++ b/pkg/utils/job_conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/job_conditions_test.go b/pkg/utils/job_conditions_test.go index 27f91c0c18..590eb12ce2 100644 --- a/pkg/utils/job_conditions_test.go +++ b/pkg/utils/job_conditions_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index cc6a9e19ba..e89947cfdf 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,16 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils import ( + "fmt" "reflect" + "strconv" "strings" corev1 "k8s.io/api/core/v1" @@ -25,6 +30,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// AlphaMetadataNamespace is the annotation and label namespace used by the alpha features of +// the operator +const AlphaMetadataNamespace = "alpha.cnpg.io" + // MetadataNamespace is the annotation and label namespace used by the operator const MetadataNamespace = "cnpg.io" @@ -71,10 +80,14 @@ const ( // scheduled backup if a backup is created by a scheduled backup ParentScheduledBackupLabelName = MetadataNamespace + "/scheduled-backup" - // WatchedLabelName the name of the label which tell if a resource change will be automatically reloaded by instance + // WatchedLabelName the name of the label which tells if a resource change will be automatically reloaded by instance // or not, use for Secrets or ConfigMaps WatchedLabelName = MetadataNamespace + "/reload" + // UserTypeLabelName the name of the label which tells if a Secret refers + // to a superuser database role or an application one + UserTypeLabelName = MetadataNamespace + "/userType" + // BackupTimelineLabelName is the name or the label where the timeline of a backup is kept BackupTimelineLabelName = MetadataNamespace + "/backupTimeline" @@ -96,6 +109,9 @@ const ( // PluginNameLabelName is the name of the label to be applied to services // to have them detected as CNPG-i plugins PluginNameLabelName = MetadataNamespace + "/pluginName" + + // LivenessPingerAnnotationName is the name of the pinger configuration + LivenessPingerAnnotationName = AlphaMetadataNamespace + "/livenessPinger" ) const ( @@ -203,6 +219,10 @@ const ( // BackupTablespaceMapFileAnnotationName is the name of the annotation where the `tablespace_map` file is kept BackupTablespaceMapFileAnnotationName = MetadataNamespace + "/backupTablespaceMapFile" + // BackupVolumeSnapshotDeadlineAnnotationName is the annotation for the snapshot backup failure deadline in minutes. + // It is only applied to snapshot retryable errors + BackupVolumeSnapshotDeadlineAnnotationName = MetadataNamespace + "/volumeSnapshotDeadline" + // SnapshotStartTimeAnnotationName is the name of the annotation where a snapshot's start time is kept SnapshotStartTimeAnnotationName = MetadataNamespace + "/snapshotStartTime" @@ -229,6 +249,21 @@ const ( // PluginPortAnnotationName is the name of the annotation containing the // port the plugin is listening to PluginPortAnnotationName = MetadataNamespace + "/pluginPort" + + // PodPatchAnnotationName is the name of the annotation containing the + // patch to apply to the pod + PodPatchAnnotationName = MetadataNamespace + "/podPatch" + + // WebhookValidationAnnotationName is the name of the annotation describing if + // the validation webhook should be enabled or disabled + WebhookValidationAnnotationName = MetadataNamespace + "/validation" + + // FailoverQuorumAnnotationName is the name of the annotation that allows the + // user to enable synchronous quorum failover protection. + // + // This feature enables quorum-based check before failover, ensuring + // no data loss at the expense of availability. + FailoverQuorumAnnotationName = AlphaMetadataNamespace + "/failoverQuorum" ) type annotationStatus string @@ -273,6 +308,19 @@ const ( HibernationAnnotationValueOn HibernationAnnotationValue = "on" ) +// UserType tells if a secret refers to a superuser database role +// or an application one +type UserType string + +const ( + // UserTypeSuperuser is the type of a superuser database + // role + UserTypeSuperuser UserType = "superuser" + + // UserTypeApp is the type of an application role + UserTypeApp UserType = "app" +) + // LabelClusterName labels the object with the cluster name func LabelClusterName(object *metav1.ObjectMeta, name string) { if object.Labels == nil { @@ -479,3 +527,18 @@ func MergeObjectsMetadata(receiver client.Object, giver client.Object) { receiver.SetLabels(mergeMap(receiver.GetLabels(), giver.GetLabels())) receiver.SetAnnotations(mergeMap(receiver.GetAnnotations(), giver.GetAnnotations())) } + +// GetClusterSerialValue returns the `nodeSerial` value from the given annotation map or return an error +func GetClusterSerialValue(annotations map[string]string) (int, error) { + rawSerial, ok := annotations[ClusterSerialAnnotationName] + if !ok { + return 0, fmt.Errorf("no serial annotation found") + } + + serial, err := strconv.Atoi(rawSerial) + if err != nil { + return 0, fmt.Errorf("invalid serial annotation found: %w", err) + } + + return serial, nil +} diff --git a/pkg/utils/labels_annotations_test.go b/pkg/utils/labels_annotations_test.go index 2de3e3544f..d93796af5b 100644 --- a/pkg/utils/labels_annotations_test.go +++ b/pkg/utils/labels_annotations_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -152,7 +155,7 @@ var _ = Describe("Annotate pods management", func() { } AnnotateAppArmor(&pod.ObjectMeta, &pod.Spec, annotations) - _, isPresent := pod.ObjectMeta.Annotations[appArmorPostgres] + _, isPresent := pod.Annotations[appArmorPostgres] Expect(isPresent).To(BeFalse()) }) }) diff --git a/pkg/utils/logs/cluster_logs_test.go b/pkg/utils/logs/cluster_logs_test.go deleted file mode 100644 index 1622498168..0000000000 --- a/pkg/utils/logs/cluster_logs_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logs - -import ( - "bytes" - "context" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Cluster logging tests", func() { - clusterNamespace := "cluster-test" - clusterName := "myTestCluster" - cluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: clusterNamespace, - Name: clusterName, - }, - } - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: clusterNamespace, - Name: clusterName + "-1", - Labels: map[string]string{ - utils.ClusterLabelName: clusterName, - }, - }, - } - It("should exit on ended pod logs with the non-follow option", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - var wait sync.WaitGroup - wait.Add(1) - go func() { - defer GinkgoRecover() - defer wait.Done() - streamClusterLogs := ClusterStreamingRequest{ - Cluster: cluster, - Options: &v1.PodLogOptions{ - Follow: false, - }, - Client: client, - } - err := streamClusterLogs.SingleStream(ctx, &logBuffer) - Expect(err).NotTo(HaveOccurred()) - }() - ctx.Done() - wait.Wait() - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("should catch extra logs if given the follow option", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - // let's set a short follow-wait, and keep the cluster streaming for two - // cycles - followWaiting := 200 * time.Millisecond - ctx2, cancel := context.WithTimeout(ctx, 300*time.Millisecond) - go func() { - defer GinkgoRecover() - streamClusterLogs := ClusterStreamingRequest{ - Cluster: cluster, - Options: &v1.PodLogOptions{ - Follow: true, - }, - FollowWaiting: followWaiting, - Client: client, - } - err := streamClusterLogs.SingleStream(ctx2, &logBuffer) - Expect(err).NotTo(HaveOccurred()) - }() - // give the stream call time to do a new search for pods - time.Sleep(350 * time.Millisecond) - cancel() - // the fake pod will be seen twice - Expect(logBuffer.String()).To(BeEquivalentTo("fake logsfake logs")) - }) -}) diff --git a/pkg/utils/logs/logs.go b/pkg/utils/logs/logs.go deleted file mode 100644 index 8cf9aeda70..0000000000 --- a/pkg/utils/logs/logs.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package logs contains code to fetch logs from Kubernetes pods -package logs - -import ( - "bufio" - "context" - "fmt" - "io" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" -) - -// StreamingRequest represents a request to stream a pod's logs -type StreamingRequest struct { - Pod *v1.Pod - Options *v1.PodLogOptions - Previous bool `json:"previous,omitempty"` - // NOTE: the Client argument may be omitted, but it is good practice to pass it - // Importantly, it makes the logging functions testable - Client kubernetes.Interface -} - -func (spl *StreamingRequest) getPodName() string { - if spl.Pod != nil { - return spl.Pod.Name - } - return "" -} - -func (spl *StreamingRequest) getPodNamespace() string { - if spl.Pod != nil { - return spl.Pod.Namespace - } - return "" -} - -func (spl *StreamingRequest) getLogOptions() *v1.PodLogOptions { - if spl.Options == nil { - spl.Options = &v1.PodLogOptions{} - } - spl.Options.Previous = spl.Previous - return spl.Options -} - -func (spl *StreamingRequest) getKubernetesClient() kubernetes.Interface { - if spl.Client != nil { - return spl.Client - } - conf := ctrl.GetConfigOrDie() - - spl.Client = kubernetes.NewForConfigOrDie(conf) - - return spl.Client -} - -// getStreamToPod opens the REST request to the pod -func (spl *StreamingRequest) getStreamToPod() *rest.Request { - client := spl.getKubernetesClient() - pods := client.CoreV1().Pods(spl.getPodNamespace()) - - return pods.GetLogs( - spl.getPodName(), - spl.getLogOptions()) -} - -// Stream streams the pod logs and shunts them to the `writer`. -func (spl *StreamingRequest) Stream(ctx context.Context, writer io.Writer) (err error) { - wrapErr := func(err error) error { return fmt.Errorf("in Stream: %w", err) } - - logsRequest := spl.getStreamToPod() - logStream, err := logsRequest.Stream(ctx) - if err != nil { - return wrapErr(err) - } - defer func() { - innerErr := logStream.Close() - if err == nil && innerErr != nil { - err = innerErr - } - }() - - _, err = io.Copy(writer, logStream) - if err != nil { - err = wrapErr(err) - } - return err -} - -// TailPodLogs streams the pod logs starting from the current time, and keeps -// waiting for any new logs, until the context is cancelled by the calling process -// If `parseTimestamps` is true, the log line will have the timestamp in -// human-readable prepended. NOTE: this will make log-lines NON-JSON -func TailPodLogs( - ctx context.Context, - client kubernetes.Interface, - pod v1.Pod, - writer io.Writer, - parseTimestamps bool, -) error { - now := metav1.Now() - streamPodLog := StreamingRequest{ - Pod: &pod, - Options: &v1.PodLogOptions{ - Timestamps: parseTimestamps, - Follow: true, - SinceTime: &now, - }, - Client: client, - } - return streamPodLog.Stream(ctx, writer) -} - -// GetPodLogs streams the pod logs and shunts them to the `writer`, as well as -// returning the last `requestedLineLength` of lines of logs in a slice. -// If `getPrevious` was activated, it will get the previous logs -// -// TODO: this function is a bit hacky. The K8s PodLogOptions have a field -// called `TailLines` that seems to be just what we would like. -// HOWEVER: we want the full logs too, so we can write them to a file, in addition to -// the `TailLines` we want to pass along for display -func GetPodLogs( - ctx context.Context, - client kubernetes.Interface, - pod v1.Pod, - getPrevious bool, - writer io.Writer, - requestedLineLength int, -) ( - []string, error, -) { - wrapErr := func(err error) error { return fmt.Errorf("in GetPodLogs: %w", err) } - - streamPodLog := StreamingRequest{ - Pod: &pod, - Previous: getPrevious, - Options: &v1.PodLogOptions{}, - Client: client, - } - logsRequest := streamPodLog.getStreamToPod() - - logStream, err := logsRequest.Stream(ctx) - if err != nil { - return nil, wrapErr(err) - } - defer func() { - innerErr := logStream.Close() - if err == nil && innerErr != nil { - err = innerErr - } - }() - - rd := bufio.NewReader(logStream) - teedReader := io.TeeReader(rd, writer) - scanner := bufio.NewScanner(teedReader) - scanner.Buffer(make([]byte, 0, 4096), 1024*1024) - - if requestedLineLength <= 0 { - requestedLineLength = 10 - } - - // slice to hold the last `requestedLineLength` lines of log - lines := make([]string, requestedLineLength) - // index of the current line of the log (starting from zero) - i := 0 - // index in the slice that holds the current line of log - curIdx := 0 - - for scanner.Scan() { - lines[curIdx] = scanner.Text() - i++ - // `curIdx` walks from `0` to `requestedLineLength-1` and then to `0` in a cycle - curIdx = i % requestedLineLength - } - - if err := scanner.Err(); err != nil { - return nil, wrapErr(err) - } - // if `curIdx` walks to in the middle of 0 and `requestedLineLength-1`, assemble the last `requestedLineLength` - // lines of logs - if i > requestedLineLength && curIdx < (requestedLineLength-1) { - return append(lines[curIdx+1:], lines[:curIdx+1]...), nil - } - - return lines, nil -} diff --git a/pkg/utils/logs/logs_test.go b/pkg/utils/logs/logs_test.go deleted file mode 100644 index 8631c1c998..0000000000 --- a/pkg/utils/logs/logs_test.go +++ /dev/null @@ -1,148 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logs - -import ( - "bytes" - "context" - "sync" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Pod logging tests", func() { - podNamespace := "pod-test" - podName := "pod-name-test" - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: podNamespace, - Name: podName, - }, - } - - podLogOptions := &v1.PodLogOptions{} - - It("should return the proper podName", func() { - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - } - Expect(streamPodLog.getPodName()).To(BeEquivalentTo(podName)) - Expect(streamPodLog.getPodNamespace()).To(BeEquivalentTo(podNamespace)) - }) - - It("should be able to handle the nil Pod", func(ctx context.Context) { - // the nil pod passed will still default to the empty pod name - client := fake.NewSimpleClientset() - streamPodLog := StreamingRequest{ - Pod: nil, - Options: podLogOptions, - Client: client, - } - var logBuffer bytes.Buffer - err := streamPodLog.Stream(ctx, &logBuffer) - Expect(err).NotTo(HaveOccurred()) - // The fake Client will be given a pod name of "", but it will still - // go on along. In production, we'd have an error when pod not found - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - Expect(streamPodLog.getPodName()).To(BeEquivalentTo("")) - Expect(streamPodLog.getPodNamespace()).To(BeEquivalentTo("")) - }) - - It("previous option must be false by default", func() { - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - } - Expect(streamPodLog.getLogOptions().Previous).To(BeFalse()) - }) - - It("getLogOptions respects the Previous field setting", func() { - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - } - options := streamPodLog.getLogOptions() - Expect(options.Previous).To(BeFalse()) - - streamPodLog.Previous = true - options = streamPodLog.getLogOptions() - Expect(options.Previous).To(BeTrue()) - }) - - It("should read the logs with the provided k8s Client", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - Previous: false, - Client: client, - } - - var logBuffer bytes.Buffer - err := streamPodLog.Stream(ctx, &logBuffer) - Expect(err).ToNot(HaveOccurred()) - - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("GetPodLogs correctly streams and provides output lines", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - lines, err := GetPodLogs(ctx, client, *pod, false, &logBuffer, 2) - Expect(err).ToNot(HaveOccurred()) - Expect(lines).To(HaveLen(2)) - Expect(lines[0]).To(BeEquivalentTo("fake logs")) - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("GetPodLogs defaults to non-zero lines shown if set to zero", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - lines, err := GetPodLogs(ctx, client, *pod, false, &logBuffer, 0) - Expect(err).ToNot(HaveOccurred()) - Expect(lines).To(HaveLen(10)) - Expect(lines[0]).To(BeEquivalentTo("fake logs")) - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("can follow pod logs", func() { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - ctx := context.TODO() - var wait sync.WaitGroup - wait.Add(1) - go func() { - defer GinkgoRecover() - defer wait.Done() - err := TailPodLogs(ctx, client, *pod, &logBuffer, true) - Expect(err).NotTo(HaveOccurred()) - }() - // calling ctx.Done is not strictly necessary because the fake Client - // will terminate the pod stream anyway, ending TailPodLogs. - // But in "production", TailPodLogs will follow - // the pod logs until the context, or the logs, are over - ctx.Done() - wait.Wait() - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) -}) diff --git a/pkg/utils/math.go b/pkg/utils/math.go index 7a98f3d02c..03b13ece60 100644 --- a/pkg/utils/math.go +++ b/pkg/utils/math.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/operations.go b/pkg/utils/operations.go index 78ae73afb8..13083e7739 100644 --- a/pkg/utils/operations.go +++ b/pkg/utils/operations.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/operations_test.go b/pkg/utils/operations_test.go index f619946664..fd26743532 100644 --- a/pkg/utils/operations_test.go +++ b/pkg/utils/operations_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/ownership.go b/pkg/utils/ownership.go index 277a875949..dee78cd514 100644 --- a/pkg/utils/ownership.go +++ b/pkg/utils/ownership.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/parser.go b/pkg/utils/parser.go index 6cca7e804e..3cba855094 100644 --- a/pkg/utils/parser.go +++ b/pkg/utils/parser.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -21,6 +24,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "strconv" "strings" "github.com/cloudnative-pg/machinery/pkg/log" @@ -31,31 +35,108 @@ import ( type pgControlDataKey = string const ( - // PgControlDataKeyLatestCheckpointTimelineID is the + // pgControlDataKeyLatestCheckpointTimelineID is the // latest checkpoint's TimeLineID pg_controldata entry - PgControlDataKeyLatestCheckpointTimelineID pgControlDataKey = "Latest checkpoint's TimeLineID" + pgControlDataKeyLatestCheckpointTimelineID pgControlDataKey = "Latest checkpoint's TimeLineID" - // PgControlDataKeyREDOWALFile is the latest checkpoint's + // pgControlDataKeyREDOWALFile is the latest checkpoint's // REDO WAL file pg_controldata entry - PgControlDataKeyREDOWALFile pgControlDataKey = "Latest checkpoint's REDO WAL file" + pgControlDataKeyREDOWALFile pgControlDataKey = "Latest checkpoint's REDO WAL file" - // PgControlDataKeyDatabaseSystemIdentifier is the database + // pgControlDataKeyDatabaseSystemIdentifier is the database // system identifier pg_controldata entry - PgControlDataKeyDatabaseSystemIdentifier pgControlDataKey = "Database system identifier" + pgControlDataKeyDatabaseSystemIdentifier pgControlDataKey = "Database system identifier" - // PgControlDataKeyLatestCheckpointREDOLocation is the latest + // pgControlDataKeyLatestCheckpointREDOLocation is the latest // checkpoint's REDO location pg_controldata entry - PgControlDataKeyLatestCheckpointREDOLocation pgControlDataKey = "Latest checkpoint's REDO location" + pgControlDataKeyLatestCheckpointREDOLocation pgControlDataKey = "Latest checkpoint's REDO location" - // PgControlDataKeyTimeOfLatestCheckpoint is the time + // pgControlDataKeyTimeOfLatestCheckpoint is the time // of latest checkpoint pg_controldata entry - PgControlDataKeyTimeOfLatestCheckpoint pgControlDataKey = "Time of latest checkpoint" + pgControlDataKeyTimeOfLatestCheckpoint pgControlDataKey = "Time of latest checkpoint" - // PgControlDataDatabaseClusterStateKey is the status + // pgControlDataDatabaseClusterStateKey is the status // of the latest primary that run on this data directory. - PgControlDataDatabaseClusterStateKey pgControlDataKey = "Database cluster state" + pgControlDataDatabaseClusterStateKey pgControlDataKey = "Database cluster state" + + // pgControlDataDataPageChecksumVersion reports whether the checksums are enabled in the cluster + pgControlDataDataPageChecksumVersion pgControlDataKey = "Data page checksum version" + + // pgControlDataBytesPerWALSegment reports the size of the WAL segments + pgControlDataBytesPerWALSegment pgControlDataKey = "Bytes per WAL segment" ) +// PgControlData represents the parsed output of pg_controldata +type PgControlData map[pgControlDataKey]string + +// GetLatestCheckpointTimelineID returns the latest checkpoint's TimeLineID +func (p PgControlData) GetLatestCheckpointTimelineID() string { + return p[pgControlDataKeyLatestCheckpointTimelineID] +} + +// TryGetLatestCheckpointTimelineID returns the latest checkpoint's TimeLineID +func (p PgControlData) TryGetLatestCheckpointTimelineID() (string, bool) { + v, ok := p[pgControlDataKeyLatestCheckpointTimelineID] + return v, ok +} + +// GetREDOWALFile returns the latest checkpoint's REDO WAL file +func (p PgControlData) GetREDOWALFile() string { + return p[pgControlDataKeyREDOWALFile] +} + +// TryGetREDOWALFile returns the latest checkpoint's REDO WAL file +func (p PgControlData) TryGetREDOWALFile() (string, bool) { + v, ok := p[pgControlDataKeyREDOWALFile] + return v, ok +} + +// GetDatabaseSystemIdentifier returns the database system identifier +func (p PgControlData) GetDatabaseSystemIdentifier() string { + return p[pgControlDataKeyDatabaseSystemIdentifier] +} + +// GetLatestCheckpointREDOLocation returns the latest checkpoint's REDO location +func (p PgControlData) GetLatestCheckpointREDOLocation() string { + return p[pgControlDataKeyLatestCheckpointREDOLocation] +} + +// GetTimeOfLatestCheckpoint returns the time of latest checkpoint +func (p PgControlData) GetTimeOfLatestCheckpoint() string { + return p[pgControlDataKeyTimeOfLatestCheckpoint] +} + +// GetDatabaseClusterState returns the status of the latest primary that ran on this data directory +func (p PgControlData) GetDatabaseClusterState() string { + return p[pgControlDataDatabaseClusterStateKey] +} + +// GetDataPageChecksumVersion returns whether the checksums are enabled in the cluster +func (p PgControlData) GetDataPageChecksumVersion() (string, error) { + value, ok := p[pgControlDataDataPageChecksumVersion] + if !ok { + return "", fmt.Errorf("no '%s' section in pg_controldata output", pgControlDataDataPageChecksumVersion) + } + return value, nil +} + +// GetBytesPerWALSegment returns the size of the WAL segments +func (p PgControlData) GetBytesPerWALSegment() (int, error) { + value, ok := p[pgControlDataBytesPerWALSegment] + if !ok { + return 0, fmt.Errorf("no '%s' section in pg_controldata output", pgControlDataBytesPerWALSegment) + } + + walSegmentSize, err := strconv.Atoi(value) + if err != nil { + return 0, fmt.Errorf( + "wrong '%s' pg_controldata value (not an integer): '%s' %w", + pgControlDataBytesPerWALSegment, value, err) + } + + return walSegmentSize, nil +} + // PgDataState represents the "Database cluster state" field of pg_controldata type PgDataState string @@ -79,7 +160,7 @@ func (state PgDataState) IsShutdown(ctx context.Context) bool { } // ParsePgControldataOutput parses a pg_controldata output into a map of key-value pairs -func ParsePgControldataOutput(data string) map[string]string { +func ParsePgControldataOutput(data string) PgControlData { pairs := make(map[string]string) lines := strings.Split(data, "\n") for _, line := range lines { @@ -227,13 +308,13 @@ var ( ) // CreatePromotionToken translates a parsed pgControlData into a JSON token -func CreatePromotionToken(pgDataMap map[string]string) (string, error) { +func (p PgControlData) CreatePromotionToken() (string, error) { content := PgControldataTokenContent{ - LatestCheckpointTimelineID: pgDataMap[PgControlDataKeyLatestCheckpointTimelineID], - REDOWALFile: pgDataMap[PgControlDataKeyREDOWALFile], - DatabaseSystemIdentifier: pgDataMap[PgControlDataKeyDatabaseSystemIdentifier], - LatestCheckpointREDOLocation: pgDataMap[PgControlDataKeyLatestCheckpointREDOLocation], - TimeOfLatestCheckpoint: pgDataMap[PgControlDataKeyTimeOfLatestCheckpoint], + LatestCheckpointTimelineID: p.GetLatestCheckpointTimelineID(), + REDOWALFile: p.GetREDOWALFile(), + DatabaseSystemIdentifier: p.GetDatabaseSystemIdentifier(), + LatestCheckpointREDOLocation: p.GetLatestCheckpointREDOLocation(), + TimeOfLatestCheckpoint: p.GetTimeOfLatestCheckpoint(), OperatorVersion: versions.Info.Version, } diff --git a/pkg/utils/parser_test.go b/pkg/utils/parser_test.go index 8c2d5b1fe7..f550566ea9 100644 --- a/pkg/utils/parser_test.go +++ b/pkg/utils/parser_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -106,7 +109,7 @@ var _ = Describe("promotion token creation", func() { return err } - token, err := CreatePromotionToken(parsedControlData) + token, err := parsedControlData.CreatePromotionToken() Expect(err).ToNot(HaveOccurred()) Expect(token).ToNot(BeEmpty()) Expect(decodeBase64(token)).To(Succeed()) @@ -117,7 +120,7 @@ var _ = Describe("promotion token parser", func() { It("parses a newly generated promotion token", func() { parsedControlData := ParsePgControldataOutput(fakeControlData) - token, err := CreatePromotionToken(parsedControlData) + token, err := parsedControlData.CreatePromotionToken() Expect(err).ToNot(HaveOccurred()) tokenContent, err := ParsePgControldataToken(token) @@ -152,7 +155,7 @@ var _ = Describe("promotion token validation", func() { It("validates a newly generated promotion token", func() { parsedControlData := ParsePgControldataOutput(fakeControlData) - token, err := CreatePromotionToken(parsedControlData) + token, err := parsedControlData.CreatePromotionToken() Expect(err).ToNot(HaveOccurred()) tokenContent, err := ParsePgControldataToken(token) diff --git a/pkg/utils/pod_conditions.go b/pkg/utils/pod_conditions.go index caea69e2a8..dde8a688ac 100644 --- a/pkg/utils/pod_conditions.go +++ b/pkg/utils/pod_conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -23,20 +26,6 @@ import ( var utilsLog = log.WithName("utils") -// PodStatus represent the possible status of pods -type PodStatus string - -const ( - // PodHealthy means that a Pod is active and ready - PodHealthy = "healthy" - - // PodReplicating means that a Pod is still not ready but still active - PodReplicating = "replicating" - - // PodFailed means that a Pod will not be scheduled again (deleted or evicted) - PodFailed = "failed" -) - // IsPodReady check if a Pod is ready or not func IsPodReady(pod corev1.Pod) bool { for _, c := range pod.Status.Conditions { @@ -117,24 +106,3 @@ func CountReadyPods(podList []corev1.Pod) int { } return readyPods } - -// ListStatusPods return a list of active Pods -func ListStatusPods(podList []corev1.Pod) map[PodStatus][]string { - podsNames := make(map[PodStatus][]string) - - for _, pod := range podList { - if !pod.DeletionTimestamp.IsZero() { - continue - } - switch { - case IsPodReady(pod): - podsNames[PodHealthy] = append(podsNames[PodHealthy], pod.Name) - case IsPodActive(pod): - podsNames[PodReplicating] = append(podsNames[PodReplicating], pod.Name) - default: - podsNames[PodFailed] = append(podsNames[PodFailed], pod.Name) - } - } - - return podsNames -} diff --git a/pkg/utils/pod_conditions_test.go b/pkg/utils/pod_conditions_test.go index c63c362242..185dfc9179 100644 --- a/pkg/utils/pod_conditions_test.go +++ b/pkg/utils/pod_conditions_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils @@ -162,106 +165,4 @@ var _ = Describe("Pod conditions test suite", func() { } Expect(IsPodUnschedulable(pod)).To(BeFalse()) }) - - Describe("Properly builds ListStatusPods", func() { - healthyPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "healthyPod", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionTrue, - }, - }, - }, - } - activePod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "activePod", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionFalse, - }, - }, - }, - } - failedPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "failedPod", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodFailed, - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionFalse, - }, - }, - }, - } - - now := metav1.Now() - terminatingPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminatingPod", - DeletionTimestamp: &now, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionTrue, - }, - }, - }, - } - - It("Detects healthy pods", func() { - podList := []corev1.Pod{healthyPod, healthyPod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod", "healthyPod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - - It("Detects active pods", func() { - podList := []corev1.Pod{healthyPod, activePod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod"}, - PodReplicating: {"activePod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - - It("Detects failed pods", func() { - podList := []corev1.Pod{healthyPod, activePod, failedPod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod"}, - PodReplicating: {"activePod"}, - PodFailed: {"failedPod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - - It("Excludes terminating pods", func() { - podList := []corev1.Pod{healthyPod, activePod, failedPod, terminatingPod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod"}, - PodReplicating: {"activePod"}, - PodFailed: {"failedPod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - }) }) diff --git a/pkg/utils/reconciliation.go b/pkg/utils/reconciliation.go index 2d0798a287..13aca8c5e5 100644 --- a/pkg/utils/reconciliation.go +++ b/pkg/utils/reconciliation.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/suite_test.go b/pkg/utils/suite_test.go index 768193fc2f..e6852d10fc 100644 --- a/pkg/utils/suite_test.go +++ b/pkg/utils/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/time.go b/pkg/utils/time.go deleted file mode 100644 index ee0cb79ab7..0000000000 --- a/pkg/utils/time.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ConvertToPostgresFormat converts timestamps to PostgreSQL time format, if needed. -// e.g. "2006-01-02T15:04:05Z07:00" --> "2006-01-02 15:04:05.000000Z07:00" -// If the conversion fails, the input timestamp is returned as it is. -func ConvertToPostgresFormat(timestamp string) string { - if t, err := time.Parse(metav1.RFC3339Micro, timestamp); err == nil { - return t.Format("2006-01-02 15:04:05.000000Z07:00") - } - - if t, err := time.Parse(time.RFC3339, timestamp); err == nil { - return t.Format("2006-01-02 15:04:05.000000Z07:00") - } - - return timestamp -} - -// GetCurrentTimestamp returns the current timestamp as a string in RFC3339Micro format -func GetCurrentTimestamp() string { - t := time.Now() - return t.Format(metav1.RFC3339Micro) -} - -// GetCurrentTimestampWithFormat returns the current timestamp as a string with the specified format -func GetCurrentTimestampWithFormat(format string) string { - t := time.Now() - return t.Format(format) -} - -// DifferenceBetweenTimestamps returns the time.Duration difference between two timestamps strings in time.RFC3339. -func DifferenceBetweenTimestamps(first, second string) (time.Duration, error) { - parsedTimestamp, err := time.Parse(metav1.RFC3339Micro, first) - if err != nil { - return 0, err - } - - parsedTimestampTwo, err := time.Parse(metav1.RFC3339Micro, second) - if err != nil { - return 0, err - } - - return parsedTimestamp.Sub(parsedTimestampTwo), nil -} - -// ToCompactISO8601 converts a time.Time into a compacted version of the ISO8601 timestamp, -// removing any separators for brevity. -// -// For example: -// -// Given: 2022-01-02 15:04:05 (UTC) -// Returns: 20220102150405 -// -// This compact format is useful for generating concise, yet human-readable timestamps that -// can serve as suffixes for backup-related objects or any other contexts where space or -// character count might be a concern. -func ToCompactISO8601(t time.Time) string { - return t.Format("20060102150405") -} diff --git a/pkg/utils/time_test.go b/pkg/utils/time_test.go deleted file mode 100644 index a46d6c3d7f..0000000000 --- a/pkg/utils/time_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Time conversion", func() { - It("properly works given a string in RFC3339 format", func() { - res := ConvertToPostgresFormat("2021-09-01T10:22:47+03:00") - Expect(res).To(BeEquivalentTo("2021-09-01 10:22:47.000000+03:00")) - }) - It("return same input string if not in RFC3339 format", func() { - res := ConvertToPostgresFormat("2001-09-29 01:02:03") - Expect(res).To(BeEquivalentTo("2001-09-29 01:02:03")) - }) -}) - -var _ = Describe("Parsing targetTime", func() { - It("should calculate correctly the difference between two timestamps", func() { - By("having the first time bigger than the second", func() { - time1 := "2022-07-06T13:11:09.000000Z" - time2 := "2022-07-06T13:11:07.000000Z" - expectedSecondDifference := float64(2) - difference, err := DifferenceBetweenTimestamps(time1, time2) - Expect(err).ToNot(HaveOccurred()) - Expect(difference.Seconds()).To(Equal(expectedSecondDifference)) - }) - By("having the first time smaller than the second", func() { - time1 := "2022-07-06T13:11:07.000000Z" - time2 := "2022-07-06T13:11:09.000000Z" - expectedSecondDifference := float64(-2) - difference, err := DifferenceBetweenTimestamps(time1, time2) - Expect(err).ToNot(HaveOccurred()) - Expect(difference.Seconds()).To(Equal(expectedSecondDifference)) - }) - By("having first or second time wrong", func() { - time1 := "2022-07-06T13:12:09.000000Z" - - _, err := DifferenceBetweenTimestamps(time1, "") - Expect(err).To(HaveOccurred()) - - _, err = DifferenceBetweenTimestamps("", time1) - Expect(err).To(HaveOccurred()) - }) - }) - - It("should be RFC3339Micro format", func() { - time1 := GetCurrentTimestamp() - - _, err := time.Parse(metav1.RFC3339Micro, time1) - Expect(err).ToNot(HaveOccurred()) - }) -}) - -var _ = Describe("ToCompactISO8601", func() { - It("should return a string in the expected format for a given time", func() { - testTime := time.Date(2022, 0o1, 0o2, 15, 0o4, 0o5, 0, time.UTC) - compactISO8601 := ToCompactISO8601(testTime) - Expect(compactISO8601).To(Equal("20220102150405")) - }) - - It("should return a string of length 14", func() { - testTime := time.Now() - compactISO8601 := ToCompactISO8601(testTime) - Expect(compactISO8601).To(HaveLen(14)) - }) -}) diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 50cea755ce..8247fdcd5d 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package versions contains the version of the CloudNativePG operator and the software @@ -20,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.24.0" + Version = "1.27.0" // DefaultImageName is the default image used by the operator to create pods - DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:16.4" + DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.0" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0" ) // BuildInfo is a struct containing all the info about the build @@ -36,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.24.0" + buildVersion = "1.27.0" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.24.1.yaml b/releases/cnpg-1.24.1.yaml new file mode 100644 index 0000000000..6f1fb1b4de --- /dev/null +++ b/releases/cnpg-1.24.1.yaml @@ -0,0 +1,16625 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + allOf: + - enum: + - rw + - r + - ro + - enum: + - rw + - r + - ro + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - poolers + - scheduledbackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - scheduledbackups/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.25.0-rc1.yaml b/releases/cnpg-1.25.0-rc1.yaml new file mode 100644 index 0000000000..d69fecaf08 --- /dev/null +++ b/releases/cnpg-1.25.0-rc1.yaml @@ -0,0 +1,17645 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.25.0.yaml b/releases/cnpg-1.25.0.yaml new file mode 100644 index 0000000000..cbdfc4162b --- /dev/null +++ b/releases/cnpg-1.25.0.yaml @@ -0,0 +1,17771 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.25.1.yaml b/releases/cnpg-1.25.1.yaml new file mode 100644 index 0000000000..58439bc983 --- /dev/null +++ b/releases/cnpg-1.25.1.yaml @@ -0,0 +1,17791 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.26.0-rc1.yaml b/releases/cnpg-1.26.0-rc1.yaml new file mode 100644 index 0000000000..9c091246ed --- /dev/null +++ b/releases/cnpg-1.26.0-rc1.yaml @@ -0,0 +1,18012 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + majorVersionUpgradeFromImage: + description: |- + MajorVersionUpgradeFromImage contains the image that was + running before the major version upgrade started. + type: string + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.26.0-rc2.yaml b/releases/cnpg-1.26.0-rc2.yaml new file mode 100644 index 0000000000..c1d7b515ad --- /dev/null +++ b/releases/cnpg-1.26.0-rc2.yaml @@ -0,0 +1,18009 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + majorVersionUpgradeFromImage: + description: |- + MajorVersionUpgradeFromImage contains the image that was + running before the major version upgrade started. + type: string + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.26.0-rc3.yaml b/releases/cnpg-1.26.0-rc3.yaml new file mode 100644 index 0000000000..0e3ebc55f8 --- /dev/null +++ b/releases/cnpg-1.26.0-rc3.yaml @@ -0,0 +1,18021 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.26.0.yaml b/releases/cnpg-1.26.0.yaml new file mode 100644 index 0000000000..5b77b32d14 --- /dev/null +++ b/releases/cnpg-1.26.0.yaml @@ -0,0 +1,18020 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.26.1.yaml b/releases/cnpg-1.26.1.yaml new file mode 100644 index 0000000000..e99338fe1a --- /dev/null +++ b/releases/cnpg-1.26.1.yaml @@ -0,0 +1,18034 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.1 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.27.0-rc1.yaml b/releases/cnpg-1.27.0-rc1.yaml new file mode 100644 index 0000000000..73cb75ff2e --- /dev/null +++ b/releases/cnpg-1.27.0-rc1.yaml @@ -0,0 +1,18219 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + x-kubernetes-validations: + - message: BackupSpec is immutable once set + rule: oldSelf == self + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + extensions: + description: The configuration of the extensions to be added + items: + description: |- + ExtensionConfiguration is the configuration used to add + PostgreSQL extensions to the Cluster. + properties: + dynamic_library_path: + description: |- + The list of directories inside the image which should be added to dynamic_library_path. + If not defined, defaults to "/lib". + items: + type: string + type: array + extension_control_path: + description: |- + The list of directories inside the image which should be added to extension_control_path. + If not defined, defaults to "/share". + items: + type: string + type: array + image: + description: The image containing the extension, required + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + x-kubernetes-validations: + - message: An image reference is required + rule: has(self.reference) + ld_library_path: + description: The list of directories inside the image which + should be added to ld_library_path. + items: + type: string + type: array + name: + description: The name of the extension, required + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - image + - name + type: object + type: array + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + isolationCheck: + description: |- + Configure the feature that extends the liveness probe for a primary + instance. In addition to the basic checks, this verifies whether the + primary is isolated from the Kubernetes API server and from its + replicas, ensuring that it can be safely shut down if network + partition or API unavailability is detected. Enabled by default. + properties: + connectionTimeout: + default: 1000 + description: Timeout in milliseconds for connections during + the primary isolation check + type: integer + enabled: + default: true + description: Whether primary isolation checking is enabled + for the liveness probe + type: boolean + requestTimeout: + default: 1000 + description: Timeout in milliseconds for requests during + the primary isolation check + type: integer + type: object + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + synchronizeLogicalDecoding: + description: |- + When enabled, the operator automatically manages synchronization of logical + decoding (replication) slots across high-availability clusters. + + Requires one of the following conditions: + - PostgreSQL version 17 or later + - PostgreSQL version < 17 with pg_failover_slots extension enabled + type: boolean + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: failoverquorums.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: FailoverQuorum + listKind: FailoverQuorumList + plural: failoverquorums + singular: failoverquorum + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + FailoverQuorum contains the information about the current failover + quorum status of a PG cluster. It is updated by the instance manager + of the primary node and reset to zero by the operator to trigger + an update. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: Most recently observed status of the failover quorum. + properties: + method: + description: Contains the latest reported Method value. + type: string + primary: + description: |- + Primary is the name of the primary instance that updated + this object the latest time. + type: string + standbyNames: + description: |- + StandbyNames is the list of potentially synchronous + instance names. + items: + type: string + type: array + standbyNumber: + description: |- + StandbyNumber is the number of synchronous standbys that transactions + need to wait for replies from. + type: integer + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - failoverquorums/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - failoverquorums + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0-rc1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0-rc1 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/cnpg-1.27.0.yaml b/releases/cnpg-1.27.0.yaml new file mode 100644 index 0000000000..19397ae0bf --- /dev/null +++ b/releases/cnpg-1.27.0.yaml @@ -0,0 +1,18219 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + x-kubernetes-validations: + - message: BackupSpec is immutable once set + rule: oldSelf == self + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + extensions: + description: The configuration of the extensions to be added + items: + description: |- + ExtensionConfiguration is the configuration used to add + PostgreSQL extensions to the Cluster. + properties: + dynamic_library_path: + description: |- + The list of directories inside the image which should be added to dynamic_library_path. + If not defined, defaults to "/lib". + items: + type: string + type: array + extension_control_path: + description: |- + The list of directories inside the image which should be added to extension_control_path. + If not defined, defaults to "/share". + items: + type: string + type: array + image: + description: The image containing the extension, required + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + x-kubernetes-validations: + - message: An image reference is required + rule: has(self.reference) + ld_library_path: + description: The list of directories inside the image which + should be added to ld_library_path. + items: + type: string + type: array + name: + description: The name of the extension, required + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - image + - name + type: object + type: array + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + isolationCheck: + description: |- + Configure the feature that extends the liveness probe for a primary + instance. In addition to the basic checks, this verifies whether the + primary is isolated from the Kubernetes API server and from its + replicas, ensuring that it can be safely shut down if network + partition or API unavailability is detected. Enabled by default. + properties: + connectionTimeout: + default: 1000 + description: Timeout in milliseconds for connections during + the primary isolation check + type: integer + enabled: + default: true + description: Whether primary isolation checking is enabled + for the liveness probe + type: boolean + requestTimeout: + default: 1000 + description: Timeout in milliseconds for requests during + the primary isolation check + type: integer + type: object + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + synchronizeLogicalDecoding: + description: |- + When enabled, the operator automatically manages synchronization of logical + decoding (replication) slots across high-availability clusters. + + Requires one of the following conditions: + - PostgreSQL version 17 or later + - PostgreSQL version < 17 with pg_failover_slots extension enabled + type: boolean + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: failoverquorums.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: FailoverQuorum + listKind: FailoverQuorumList + plural: failoverquorums + singular: failoverquorum + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + FailoverQuorum contains the information about the current failover + quorum status of a PG cluster. It is updated by the instance manager + of the primary node and reset to zero by the operator to trigger + an update. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: Most recently observed status of the failover quorum. + properties: + method: + description: Contains the latest reported Method value. + type: string + primary: + description: |- + Primary is the name of the primary instance that updated + this object the latest time. + type: string + standbyNames: + description: |- + StandbyNames is the list of potentially synchronous + instance names. + items: + type: string + type: array + standbyNumber: + description: |- + StandbyNumber is the number of synchronous standbys that transactions + need to wait for replies from. + type: integer + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - failoverquorums/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - failoverquorums + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None diff --git a/releases/operator-manifests.go b/releases/operator-manifests.go index 5f7e1e90ac..eb2f7e25c6 100644 --- a/releases/operator-manifests.go +++ b/releases/operator-manifests.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package releases contains the filesystem with operator manifests with all the diff --git a/tests/e2e/affinity_test.go b/tests/e2e/affinity_test.go index 3a74391ac2..e5e2de357a 100644 --- a/tests/e2e/affinity_test.go +++ b/tests/e2e/affinity_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -20,7 +23,7 @@ import ( "fmt" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -44,13 +47,13 @@ var _ = Describe("E2E Affinity", Serial, Label(tests.LabelPodScheduling), func() }) It("can create a cluster and a pooler with required affinity", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterFile, env) createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerFile, 3) - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) AssertClusterIsReady(namespace, clusterName, 300, env) }) diff --git a/tests/e2e/apparmor_test.go b/tests/e2e/apparmor_test.go index 70ecabeeff..8d75e5b932 100644 --- a/tests/e2e/apparmor_test.go +++ b/tests/e2e/apparmor_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -22,6 +25,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,16 +52,16 @@ var _ = Describe("AppArmor support", Serial, Label(tests.LabelNoOpenshift, tests }) It("sets up a cluster enabling AppArmor annotation feature", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterAppArmorFile, env) By("verifying AppArmor annotations on cluster and pods", func() { // Gathers the pod list using annotations - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) for _, pod := range podList.Items { - annotation := pod.ObjectMeta.Annotations[pkgutils.AppArmorAnnotationPrefix+"/"+specs.PostgresContainerName] + annotation := pod.Annotations[pkgutils.AppArmorAnnotationPrefix+"/"+specs.PostgresContainerName] Expect(annotation).ShouldNot(BeEmpty(), fmt.Sprintf("annotation for apparmor is not on pod %v", specs.PostgresContainerName)) Expect(annotation).Should(BeEquivalentTo("runtime/default"), diff --git a/tests/e2e/architecture_test.go b/tests/e2e/architecture_test.go index 34222b58cb..f159e8fe44 100644 --- a/tests/e2e/architecture_test.go +++ b/tests/e2e/architecture_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -19,7 +22,9 @@ package e2e import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -74,21 +79,21 @@ var _ = Describe("Available Architectures", Label(tests.LabelBasic), func() { var err error It("manages each available architecture", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) // Fetch the operator's available architectures - operatorPod, err := env.GetOperatorPod() + operatorPod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - imageArchitectures, err := utils.GetOperatorArchitectures(&operatorPod) + imageArchitectures, err := operator.Architectures(&operatorPod) Expect(err).ToNot(HaveOccurred()) // Fetch the Cluster status - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) archStatus := cluster.Status.AvailableArchitectures diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index aaf97e0e68..9ba006e12b 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( + "database/sql" "errors" "fmt" "os" @@ -31,27 +35,50 @@ import ( "github.com/thoas/go-funk" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "k8s.io/utils/strings/slices" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + webhookv1 "github.com/cloudnative-pg/cloudnative-pg/internal/webhook/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/envsubst" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + objectsutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/replicationslot" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -func AssertSwitchover(namespace string, clusterName string, env *testsUtils.TestingEnvironment) { +func AssertSwitchover(namespace string, clusterName string, env *environment.TestingEnvironment) { AssertSwitchoverWithHistory(namespace, clusterName, false, env) } -func AssertSwitchoverOnReplica(namespace string, clusterName string, env *testsUtils.TestingEnvironment) { +func AssertSwitchoverOnReplica(namespace string, clusterName string, env *environment.TestingEnvironment) { AssertSwitchoverWithHistory(namespace, clusterName, true, env) } @@ -63,7 +90,7 @@ func AssertSwitchoverWithHistory( namespace string, clusterName string, isReplica bool, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { var pods []string var oldPrimary, targetPrimary string @@ -75,7 +102,7 @@ func AssertSwitchoverWithHistory( Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) g.Expect(cluster.Status.CurrentPrimary, err).To( BeEquivalentTo(cluster.Status.TargetPrimary), @@ -85,7 +112,7 @@ func AssertSwitchoverWithHistory( oldPrimary = cluster.Status.CurrentPrimary // Gather pod names - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPodListLength = len(podList.Items) for _, p := range podList.Items { @@ -100,7 +127,7 @@ func AssertSwitchoverWithHistory( By(fmt.Sprintf("setting the TargetPrimary node to trigger a switchover to %s", targetPrimary), func() { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Status.TargetPrimary = targetPrimary return env.Client.Status().Update(env.Ctx, cluster) @@ -109,11 +136,11 @@ func AssertSwitchoverWithHistory( }) By("waiting that the TargetPrimary become also CurrentPrimary", func() { - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.CurrentPrimary, err - }, testTimeouts[testsUtils.NewPrimaryAfterSwitchover]).Should(BeEquivalentTo(targetPrimary)) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.CurrentPrimary).To(BeEquivalentTo(targetPrimary)) + }, testTimeouts[timeouts.NewPrimaryAfterSwitchover]).Should(Succeed()) }) By("waiting that the old primary become ready", func() { @@ -145,7 +172,7 @@ func AssertSwitchoverWithHistory( // After we finish the switchover, we should wait for the cluster to be ready // otherwise, anyone executing this may not wait and also, the following part of the function // may fail because the switchover hasn't properly finish yet. - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) if !isReplica { By("confirming that the all postgres containers have *.history file after switchover", func() { @@ -153,7 +180,7 @@ func AssertSwitchoverWithHistory( timeout := 120 // Gather pod names - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items), err).To(BeEquivalentTo(oldPodListLength)) for _, p := range podList.Items { pods = append(pods, p.Name) @@ -162,8 +189,9 @@ func AssertSwitchoverWithHistory( Eventually(func() error { count := 0 for _, pod := range pods { - out, _, err := env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: pod, }, nil, "sh", "-c", "ls $PGDATA/pg_wal/*.history") @@ -172,7 +200,8 @@ func AssertSwitchoverWithHistory( } numHistory := len(strings.Split(strings.TrimSpace(out), "\n")) - GinkgoWriter.Printf("count %d: pod: %s, the number of history file in pg_wal: %d\n", count, pod, numHistory) + GinkgoWriter.Printf("count %d: pod: %s, the number of history file in pg_wal: %d\n", count, pod, + numHistory) count++ if numHistory > 0 { continue @@ -194,7 +223,7 @@ func AssertCreateCluster( namespace string, clusterName string, sampleFile string, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { By(fmt.Sprintf("having a %v namespace", namespace), func() { // Creating a namespace should be quick @@ -206,19 +235,19 @@ func AssertCreateCluster( namespaceResource := &corev1.Namespace{} err := env.Client.Get(env.Ctx, namespacedName, namespaceResource) return namespaceResource.GetName(), err - }, testTimeouts[testsUtils.NamespaceCreation]).Should(BeEquivalentTo(namespace)) + }, testTimeouts[timeouts.NamespaceCreation]).Should(BeEquivalentTo(namespace)) }) By(fmt.Sprintf("creating a Cluster in the %v namespace", namespace), func() { CreateResourceFromFile(namespace, sampleFile) }) // Setting up a cluster with three pods is slow, usually 200-600s - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) } // AssertClusterIsReady checks the cluster has as many pods as in spec, that // none of them are going to be deleted, and that the status is Healthy -func AssertClusterIsReady(namespace string, clusterName string, timeout int, env *testsUtils.TestingEnvironment) { +func AssertClusterIsReady(namespace string, clusterName string, timeout int, env *environment.TestingEnvironment) { By(fmt.Sprintf("having a Cluster %s with each instance in status ready", clusterName), func() { // Eventually the number of ready instances should be equal to the // amount of instances defined in the cluster and @@ -227,13 +256,13 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) start := time.Now() Eventually(func() (string, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -243,7 +272,7 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env return fmt.Sprintf("Pod '%s' is waiting for deletion", pod.Name), nil } } - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.Phase, err } return fmt.Sprintf("Ready pod is not as expected. Spec Instances: %d, ready pods: %d \n", @@ -251,19 +280,19 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env utils.CountReadyPods(podList.Items)), nil }, timeout, 2).Should(BeEquivalentTo(apiv1.PhaseHealthy), func() string { - cluster := testsUtils.PrintClusterResources(namespace, clusterName, env) - nodes, _ := env.DescribeKubernetesNodes() + cluster := testsUtils.PrintClusterResources(env.Ctx, env.Client, namespace, clusterName) + kubeNodes, _ := nodes.DescribeKubernetesNodes(env.Ctx, env.Client) return fmt.Sprintf("CLUSTER STATE\n%s\n\nK8S NODES\n%s", - cluster, nodes) + cluster, kubeNodes) }, ) if cluster.Spec.Instances != 1 { Eventually(func(g Gomega) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred(), "cannot get cluster pod list") - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred(), "cannot find cluster primary pod") replicaNamesList := make([]string, 0, len(podList.Items)-1) @@ -273,13 +302,14 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env } } replicaNamesString := strings.Join(replicaNamesList, ",") - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, "postgres", - fmt.Sprintf("SELECT COUNT(*) FROM pg_stat_replication WHERE application_name IN (%s)", + fmt.Sprintf("SELECT COUNT(*) FROM pg_catalog.pg_stat_replication WHERE application_name IN (%s)", replicaNamesString), ) g.Expect(err).ToNot(HaveOccurred(), "cannot extract the list of streaming replicas") @@ -290,8 +320,10 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env }) } -func AssertClusterDefault(namespace string, clusterName string, - isExpectedToDefault bool, env *testsUtils.TestingEnvironment, +func AssertClusterDefault( + namespace string, + clusterName string, + env *environment.TestingEnvironment, ) { By("having a Cluster object populated with default values", func() { // Eventually the number of ready instances should be equal to the @@ -300,43 +332,48 @@ func AssertClusterDefault(namespace string, clusterName string, var cluster *apiv1.Cluster Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) - validationErr := cluster.Validate() - if isExpectedToDefault { - Expect(validationErr).Should(BeEmpty(), validationErr) - } else { - Expect(validationErr).ShouldNot(BeEmpty(), validationErr) - } + validator := webhookv1.ClusterCustomValidator{} + validationWarn, validationErr := validator.ValidateCreate(env.Ctx, cluster) + Expect(validationWarn).To(BeEmpty()) + Expect(validationErr).ToNot(HaveOccurred()) }) } -func AssertWebhookEnabled(env *testsUtils.TestingEnvironment, mutating, validating string) { +func AssertWebhookEnabled(env *environment.TestingEnvironment, mutating, validating string) { By("re-setting namespace selector for all admission controllers", func() { // Setting the namespace selector in MutatingWebhook and ValidatingWebhook // to nil will go back to the default behaviour - mWhc, position, err := testsUtils.GetCNPGsMutatingWebhookByName(env, mutating) + mWhc, position, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutating) Expect(err).ToNot(HaveOccurred()) mWhc.Webhooks[position].NamespaceSelector = nil - err = testsUtils.UpdateCNPGsMutatingWebhookConf(env, mWhc) + err = operator.UpdateMutatingWebhookConf(env.Ctx, env.Interface, mWhc) Expect(err).ToNot(HaveOccurred()) - vWhc, position, err := testsUtils.GetCNPGsValidatingWebhookByName(env, validating) + vWhc, position, err := operator.GetValidatingWebhookByName(env.Ctx, env.Client, validating) Expect(err).ToNot(HaveOccurred()) vWhc.Webhooks[position].NamespaceSelector = nil - err = testsUtils.UpdateCNPGsValidatingWebhookConf(env, vWhc) + err = operator.UpdateValidatingWebhookConf(env.Ctx, env.Interface, vWhc) Expect(err).ToNot(HaveOccurred()) }) } // Update the secrets and verify cluster reference the updated resource version of secrets -func AssertUpdateSecret(field string, value string, secretName string, namespace string, - clusterName string, timeout int, env *testsUtils.TestingEnvironment, +func AssertUpdateSecret( + field string, + value string, + secretName string, + namespace string, + clusterName string, + timeout int, + env *environment.TestingEnvironment, ) { var secret corev1.Secret + + // Gather the secret Eventually(func(g Gomega) { err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: secretName}, @@ -344,28 +381,39 @@ func AssertUpdateSecret(field string, value string, secretName string, namespace g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) + // Change the given field to the new value provided secret.Data[field] = []byte(value) err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { return env.Client.Update(env.Ctx, &secret) }) Expect(err).ToNot(HaveOccurred()) - // Wait for the cluster pickup the updated secrets version first + // Wait for the cluster to pick up the updated secrets version first Eventually(func() string { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { GinkgoWriter.Printf("Error reports while retrieving cluster %v\n", err.Error()) return "" } switch { case strings.HasSuffix(secretName, apiv1.ApplicationUserSecretSuffix): - GinkgoWriter.Printf("Resource version of Application secret referenced in the cluster is %v\n", + GinkgoWriter.Printf("Resource version of %s secret referenced in the cluster is %v\n", + secretName, cluster.Status.SecretsResourceVersion.ApplicationSecretVersion) return cluster.Status.SecretsResourceVersion.ApplicationSecretVersion + case strings.HasSuffix(secretName, apiv1.SuperUserSecretSuffix): - GinkgoWriter.Printf("Resource version of Superuser secret referenced in the cluster is %v\n", + GinkgoWriter.Printf("Resource version of %s secret referenced in the cluster is %v\n", + secretName, cluster.Status.SecretsResourceVersion.SuperuserSecretVersion) return cluster.Status.SecretsResourceVersion.SuperuserSecretVersion + + case cluster.UsesSecretInManagedRoles(secretName): + GinkgoWriter.Printf("Resource version of %s ManagedRole secret referenced in the cluster is %v\n", + secretName, + cluster.Status.SecretsResourceVersion.ManagedRoleSecretVersions[secretName]) + return cluster.Status.SecretsResourceVersion.ManagedRoleSecretVersions[secretName] + default: GinkgoWriter.Printf("Unsupported secrets name found %v\n", secretName) return "" @@ -373,330 +421,214 @@ func AssertUpdateSecret(field string, value string, secretName string, namespace }, timeout).Should(BeEquivalentTo(secret.ResourceVersion)) } -// AssertConnection is used if a connection from a pod to a postgresql -// database works -func AssertConnection(host string, user string, dbname string, - password string, queryingPod corev1.Pod, timeout int, env *testsUtils.TestingEnvironment, +// AssertConnection is used if a connection from a pod to a postgresql database works +func AssertConnection( + namespace string, + service string, + dbname string, + user string, + password string, + env *environment.TestingEnvironment, ) { - By(fmt.Sprintf("connecting to the %v service as %v", host, user), func() { - Eventually(func() string { - dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", host, user, dbname, password) - commandTimeout := time.Second * 10 - stdout, _, err := env.ExecCommand(env.Ctx, queryingPod, specs.PostgresContainerName, &commandTimeout, - "psql", dsn, "-tAc", "SELECT 1") - if err != nil { - return "" - } - return stdout - }, timeout).Should(Equal("1\n")) - }) -} - -// AssertOperatorIsReady verifies that the operator is ready -func AssertOperatorIsReady() { - Eventually(func() (bool, error) { - ready, err := env.IsOperatorReady() - if ready && err == nil { - return true, nil - } - // Waiting a bit to avoid overloading the API server - time.Sleep(1 * time.Second) - return ready, err - }, testTimeouts[testsUtils.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready") -} - -// AssertDatabaseIsReady checks the database on the primary is ready to run queries -// -// NOTE: even if we checked AssertClusterIsReady, a temporary DB connectivity issue would take -// failureThreshold x periodSeconds to be detected -func AssertDatabaseIsReady(namespace, clusterName, dbName string) { - By(fmt.Sprintf("checking the database on %s is ready", clusterName), func() { + By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() { Eventually(func(g Gomega) { - primary, err := env.GetClusterPrimary(namespace, clusterName) + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, dbname, user, password, + ) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() g.Expect(err).ToNot(HaveOccurred()) - stdout, stderr, err := env.ExecCommandInInstancePod(testsUtils.PodLocator{ - Namespace: namespace, - PodName: primary.GetName(), - }, nil, "pg_isready") - g.Expect(err).ShouldNot(HaveOccurred()) - g.Expect(stderr).To(BeEmpty(), "while checking pg_isready") - g.Expect(stdout).To(ContainSubstring("accepting"), "while checking pg_isready: Not accepting connections") - _, _, err = env.ExecQueryInInstancePod(testsUtils.PodLocator{ - Namespace: namespace, - PodName: primary.GetName(), - }, testsUtils.DatabaseName(dbName), "select 1") - g.Expect(err).ShouldNot(HaveOccurred()) - }, RetryTimeout, PollingTime).Should(Succeed()) + var rawValue string + row := conn.QueryRow("SELECT 1") + err = row.Scan(&rawValue) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1")) + }, RetryTimeout).Should(Succeed()) }) } -// AssertCreateTestData create test on the "app" database -func AssertCreateTestData(namespace, clusterName, tableName string, pod *corev1.Pod) { - AssertDatabaseIsReady(namespace, clusterName, testsUtils.AppDBName) - By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() { - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - Eventually(func() error { - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) - }) +type TableLocator struct { + Namespace string + ClusterName string + DatabaseName string + TableName string + Tablespace string } -// AssertCreateTestDataWithDatabaseName create test data in a given database. -func AssertCreateTestDataWithDatabaseName( - namespace, - clusterName, - databaseName, - tableName string, - pod *corev1.Pod, -) { - By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() { - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - Eventually(func() error { - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - databaseName, - query, - ) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) - }) -} +// AssertCreateTestData create test data on a given TableLocator +func AssertCreateTestData(env *environment.TestingEnvironment, tl TableLocator) { + if tl.DatabaseName == "" { + tl.DatabaseName = postgres.AppDBName + } + if tl.Tablespace == "" { + tl.Tablespace = postgres.TablespaceDefaultName + } -type TableLocator struct { - Namespace string - ClusterName string - TableName string - Tablespace string -} + By(fmt.Sprintf("creating test data in table %v (cluster %v, database %v, tablespace %v)", + tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + tl.Namespace, + tl.ClusterName, + tl.DatabaseName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) -// AssertCreateTestDataInTablespace create test data. -func AssertCreateTestDataInTablespace(tl TableLocator, pod *corev1.Pod) { - AssertDatabaseIsReady(tl.Namespace, tl.ClusterName, testsUtils.AppDBName) - By(fmt.Sprintf("creating test data in tablespace %q", tl.Tablespace), func() { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v TABLESPACE %v AS VALUES (1),(2);", tl.TableName, tl.Tablespace) - Eventually(func() error { - _, _, err := env.ExecCommandWithPsqlClient( - tl.Namespace, - tl.ClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) + + _, err = conn.Exec(query) + Expect(err).ToNot(HaveOccurred()) }) } // AssertCreateTestDataLargeObject create large objects with oid and data -func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, data string, pod *corev1.Pod) { +func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, data string) { By("creating large object", func() { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS image (name text,raster oid); "+ "INSERT INTO image (name, raster) VALUES ('beautiful image', lo_from_bytea(%d, '%s'));", oid, data) - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - testsUtils.AppDBName, - appUser, - appUserPass, - query, - env) + + _, err := postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) } -// insertRecordIntoTableWithDatabaseName insert an entry into a table -func insertRecordIntoTableWithDatabaseName( - namespace, - clusterName, - databaseName, - tableName string, - value int, - pod *corev1.Pod, -) { - query := fmt.Sprintf("INSERT INTO %v VALUES (%v);", tableName, value) - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - databaseName, - appUser, - appUserPass, - query, - env) +// insertRecordIntoTable insert an entry into a table +func insertRecordIntoTable(tableName string, value int, conn *sql.DB) { + _, err := conn.Exec(fmt.Sprintf("INSERT INTO %s VALUES (%d)", tableName, value)) Expect(err).ToNot(HaveOccurred()) } -// insertRecordIntoTable insert an entry into a table -func insertRecordIntoTable(namespace, clusterName, tableName string, value int, pod *corev1.Pod) { - query := fmt.Sprintf("INSERT INTO %v VALUES (%v);", tableName, value) - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - Expect(err).NotTo(HaveOccurred()) +func QueryMatchExpectationPredicate( + pod *corev1.Pod, + dbname exec.DatabaseName, + query string, + expectedOutput string, +) func(g Gomega) { + return func(g Gomega) { + // executor + stdout, stderr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: pod.Namespace, PodName: pod.Name}, + dbname, + query, + ) + if err != nil { + GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo(expectedOutput), + fmt.Sprintf("expected query %q to return %q (in database %q)", query, expectedOutput, dbname)) + } } -// AssertDatabaseExists assert if database exists -func AssertDatabaseExists(namespace, podName, databaseName string, expectedValue bool) { - By(fmt.Sprintf("verifying if database %v exists", databaseName), func() { - pod := &corev1.Pod{} - commandTimeout := time.Second * 10 - query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE lower(datname) = lower('%v'));", databaseName) - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod) - Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "postgres", "-tAc", query) - Expect(err).ToNot(HaveOccurred()) - if expectedValue { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) - } else { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f")) - } - }) +func roleExistsQuery(roleName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%v')", roleName) } -// AssertUserExists assert if user exists -func AssertUserExists(namespace, podName, userName string, expectedValue bool) { - By(fmt.Sprintf("verifying if user %v exists", userName), func() { - pod := &corev1.Pod{} - commandTimeout := time.Second * 10 - query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_user WHERE lower(usename) = lower('%v'));", userName) - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod) - Expect(err).ToNot(HaveOccurred()) - stdout, stderr, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "postgres", "-tAc", query) - if err != nil { - GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) - } - Expect(err).ToNot(HaveOccurred()) - if expectedValue { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) - } else { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f")) - } - }) +func databaseExistsQuery(dbName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_database WHERE datname='%v')", dbName) } -// AssertDataExpectedCountWithDatabaseName verifies that an expected amount of rows exists on the table -func AssertDataExpectedCountWithDatabaseName(namespace, podName, databaseName string, - tableName string, expectedValue int, -) { - By(fmt.Sprintf("verifying test data on pod %v", podName), func() { - query := fmt.Sprintf("select count(*) from %v", tableName) - commandTimeout := time.Second * 10 +func extensionExistsQuery(extName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_extension WHERE extname='%v')", extName) +} - Eventually(func() (int, error) { - // We keep getting the pod, since there could be a new pod with the same name - pod := &corev1.Pod{} - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod) - if err != nil { - return 0, err - } - stdout, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", databaseName, "-tAc", query) - if err != nil { - return 0, err - } - nRows, err := strconv.Atoi(strings.Trim(stdout, "\n")) - return nRows, err - }, 300).Should(BeEquivalentTo(expectedValue)) - }) +func schemaExistsQuery(namespaceName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_namespace WHERE nspname='%v')", namespaceName) +} + +func fdwExistsQuery(fdwName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_foreign_data_wrapper WHERE fdwname='%v')", fdwName) } // AssertDataExpectedCount verifies that an expected amount of rows exists on the table -func AssertDataExpectedCount(namespace, clusterName, tableName string, expectedValue int, pod *corev1.Pod) { - By(fmt.Sprintf("verifying test data in table %v", tableName), func() { - query := fmt.Sprintf("select count(*) from %v", tableName) - Eventually(func() (int, error) { - stdout, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query) - if err != nil { - return 0, err - } - nRows, err := strconv.Atoi(strings.Trim(stdout, "\n")) - return nRows, err - }, 300).Should(BeEquivalentTo(expectedValue)) +func AssertDataExpectedCount( + env *environment.TestingEnvironment, + tl TableLocator, + expectedValue int, +) { + By(fmt.Sprintf("verifying test data in table %v (cluster %v, database %v, tablespace %v)", + tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() { + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + tl.Namespace, + tl.ClusterName, + tl.DatabaseName, + apiv1.ApplicationUserSecretSuffix, + fmt.Sprintf("SELECT COUNT(*) FROM %s", tl.TableName), + ) + Expect(err).ToNot(HaveOccurred()) + + var nRows int + err = row.Scan(&nRows) + Expect(err).ToNot(HaveOccurred()) + Expect(nRows).Should(BeEquivalentTo(expectedValue)) }) } // AssertLargeObjectValue verifies the presence of a Large Object given by its OID and data -func AssertLargeObjectValue(namespace, clusterName string, oid int, data string, pod *corev1.Pod) { +func AssertLargeObjectValue(namespace, clusterName string, oid int, data string) { By("verifying large object", func() { query := fmt.Sprintf("SELECT encode(lo_get(%v), 'escape');", oid) Eventually(func() (string, error) { // We keep getting the pod, since there could be a new pod with the same name - appUser, appUserPass, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - stdout, _, err := testsUtils.RunQueryFromPod( - pod, - host, - testsUtils.AppDBName, - appUser, - appUserPass, - query, - env) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.AppDBName, + query) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil - }, testTimeouts[testsUtils.LargeObject]).Should(BeEquivalentTo(data)) + }, testTimeouts[timeouts.LargeObject]).Should(BeEquivalentTo(data)) }) } // AssertClusterStandbysAreStreaming verifies that all the standbys of a cluster have a wal-receiver running. func AssertClusterStandbysAreStreaming(namespace string, clusterName string, timeout int32) { + query := "SELECT count(*) FROM pg_catalog.pg_stat_wal_receiver" Eventually(func() error { - standbyPods, err := env.GetClusterReplicas(namespace, clusterName) + standbyPods, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) if err != nil { return err } for _, pod := range standbyPods.Items { - timeout := time.Second * 10 - out, _, err := env.EventuallyExecCommand(env.Ctx, pod, specs.PostgresContainerName, &timeout, - "psql", "-U", "postgres", "-tAc", "SELECT count(*) FROM pg_stat_wal_receiver") + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + query) if err != nil { return err } @@ -727,22 +659,26 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout // and are following the promotion, we should find those // records on each of them. - commandTimeout := time.Second * 10 for i := 1; i < 4; i++ { podName := fmt.Sprintf("%v-%v", clusterName, i) podNamespacedName := types.NamespacedName{ Namespace: namespace, Name: podName, } + query := "SELECT count(*) > 0 FROM tps.tl WHERE timeline = '00000002'" Eventually(func() (string, error) { pod := &corev1.Pod{} if err := env.Client.Get(env.Ctx, podNamespacedName, pod); err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", - "SELECT count(*) > 0 FROM tps.tl "+ - "WHERE timeline = '00000002'") + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.AppDBName, + query) return strings.TrimSpace(out), err }, timeout).Should(BeEquivalentTo("t"), "Pod %v should have moved to timeline 2", podName) @@ -750,7 +686,7 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout }) By("having all the instances ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By(fmt.Sprintf("restoring full cluster functionality within %v seconds", timeout), func() { @@ -787,12 +723,19 @@ func AssertWritesResumedBeforeTimeout(namespace string, clusterName string, time Name: podName, } var switchTime float64 - commandTimeout := time.Second * 10 pod := &corev1.Pod{} err := env.Client.Get(env.Ctx, namespacedName, pod) Expect(err).ToNot(HaveOccurred()) - out, _, err := env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", query) + out, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, postgres.AppDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) switchTime, err = strconv.ParseFloat(strings.TrimSpace(out), 64) if err != nil { @@ -812,12 +755,15 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { timeout := 120 // Wait for the operator to set a new TargetPrimary var cluster *apiv1.Cluster - Eventually(func() (string, error) { + Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.TargetPrimary, err - }, timeout).ShouldNot(Or(BeEquivalentTo(oldPrimary), BeEquivalentTo(apiv1.PendingFailoverMarker))) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.TargetPrimary).ToNot(Or( + BeEquivalentTo(oldPrimary), + BeEquivalentTo(apiv1.PendingFailoverMarker), + )) + }, timeout).Should(Succeed()) newPrimary := cluster.Status.TargetPrimary // Expect the chosen pod to eventually become a primary @@ -833,7 +779,6 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { newPrimaryPod = newPrimary }) By(fmt.Sprintf("verifying write operation on the new primary pod: %s", newPrimaryPod), func() { - commandTimeout := time.Second * 10 namespacedName := types.NamespacedName{ Namespace: namespace, Name: newPrimaryPod, @@ -843,36 +788,25 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { Expect(err).ToNot(HaveOccurred()) // Expect write operation to succeed query := "CREATE TABLE IF NOT EXISTS assert_new_primary(var1 text);" - _, _, err = env.EventuallyExecCommand(env.Ctx, pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", query) + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, postgres.AppDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) }) } -func AssertStorageCredentialsAreCreated(namespace string, name string, id string, key string) { - Eventually(func() error { - _, _, err := testsUtils.Run(fmt.Sprintf("kubectl create secret generic %v -n %v "+ - "--from-literal='ID=%v' "+ - "--from-literal='KEY=%v'", - name, namespace, id, key)) - return err - }, 60, 5).Should(BeNil()) -} - -// minioPath gets the MinIO file string for WAL/backup objects in a configured bucket -func minioPath(serverName, fileName string) string { - // the * regexes enable matching these typical paths: - // minio/backups/serverName/base/20220618T140300/data.tar - // minio/backups/serverName/wals/0000000100000000/000000010000000000000002.gz - // minio/backups/serverName/wals/00000002.history.gz - return filepath.Join("*", serverName, "*", fileName) -} - // CheckPointAndSwitchWalOnPrimary trigger a checkpoint and switch wal on primary pod and returns the latest WAL file func CheckPointAndSwitchWalOnPrimary(namespace, clusterName string) string { var latestWAL string By("trigger checkpoint and switch wal on primary", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL = switchWalAndGetLatestArchive(namespace, primary) @@ -885,24 +819,24 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) { var latestWALPath string // Create a WAL on the primary and check if it arrives at minio, within a short time By("archiving WALs and verifying they exist", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL := switchWalAndGetLatestArchive(namespace, primary) - latestWALPath = minioPath(serverName, latestWAL+".gz") + latestWALPath = minio.GetFilePath(serverName, latestWAL+".gz") }) By(fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath), func() { Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture - return testsUtils.CountFilesOnMinio(minioEnv, latestWALPath) - }, testTimeouts[testsUtils.WalsInMinio]).Should(BeEquivalentTo(1)) + return minio.CountFiles(minioEnv, latestWALPath) + }, testTimeouts[timeouts.WalsInMinio]).Should(BeEquivalentTo(1)) }) } func AssertScheduledBackupsAreScheduled(namespace string, backupYAMLPath string, timeout int) { CreateResourceFromFile(namespace, backupYAMLPath) - scheduledBackupName, err := env.GetResourceNameFromYAML(backupYAMLPath) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupYAMLPath) Expect(err).NotTo(HaveOccurred()) // We expect the scheduled backup to be scheduled before a @@ -912,7 +846,7 @@ func AssertScheduledBackupsAreScheduled(namespace string, backupYAMLPath string, Name: scheduledBackupName, } - Eventually(func() (*v1.Time, error) { + Eventually(func() (*metav1.Time, error) { scheduledBackup := &apiv1.ScheduledBackup{} err := env.Client.Get(env.Ctx, scheduledBackupNamespacedName, scheduledBackup) @@ -971,23 +905,31 @@ func getScheduledBackupCompleteBackupsCount(namespace string, scheduledBackupNam // AssertPgRecoveryMode verifies if the target pod recovery mode is enabled or disabled func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) { By(fmt.Sprintf("verifying that postgres recovery mode is %v", expectedValue), func() { - stringExpectedValue := "f" - if expectedValue { - stringExpectedValue = "t" - } - Eventually(func() (string, error) { - commandTimeout := time.Second * 10 - stdOut, stdErr, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "postgres", "-tAc", "select pg_is_in_recovery();") + stdOut, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "select pg_catalog.pg_is_in_recovery()") if err != nil { - GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr) + GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", stdOut, stdErr) } return strings.Trim(stdOut, "\n"), err - }, 300, 10).Should(BeEquivalentTo(stringExpectedValue)) + }, 300, 10).Should(BeEquivalentTo(boolPGOutput(expectedValue))) }) } +func boolPGOutput(expectedValue bool) string { + stringExpectedValue := "f" + if expectedValue { + stringExpectedValue = "t" + } + return stringExpectedValue +} + // AssertReplicaModeCluster checks that, after inserting some data in a source cluster, // a replica cluster can be bootstrapped using pg_basebackup and is properly replicating // from the source cluster @@ -997,50 +939,74 @@ func AssertReplicaModeCluster( srcClusterDBName, replicaClusterSample, testTableName string, - pod *corev1.Pod, ) { var primaryReplicaCluster *corev1.Pod - commandTimeout := time.Second * 10 checkQuery := fmt.Sprintf("SELECT count(*) FROM %v", testTableName) - AssertDatabaseIsReady(namespace, srcClusterName, srcClusterDBName) - - AssertCreateTestDataWithDatabaseName( - namespace, - srcClusterName, - srcClusterDBName, - testTableName, - pod, - ) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: srcClusterDBName, + TableName: testTableName, + } + AssertCreateTestData(env, tableLocator) By("creating replica cluster", func() { - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSample) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, replicaClusterName, replicaClusterSample, env) // Get primary from replica cluster Eventually(func() error { - primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName) + primaryReplicaCluster, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + replicaClusterName) return err - }, 30, 3).Should(BeNil()) + }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(primaryReplicaCluster, true) }) By("checking data have been copied correctly in replica cluster", func() { Eventually(func() (string, error) { - stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", srcClusterDBName, "-tAc", checkQuery) + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, + exec.DatabaseName(srcClusterDBName), + checkQuery) return strings.Trim(stdOut, "\n"), err }, 180, 10).Should(BeEquivalentTo("2")) }) By("writing some new data to the source cluster", func() { - insertRecordIntoTableWithDatabaseName(namespace, srcClusterName, srcClusterDBName, testTableName, 3, pod) + forwardSource, connSource, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + srcClusterName, + srcClusterDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = connSource.Close() + forwardSource.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(testTableName, 3, connSource) }) By("checking new data have been copied correctly in replica cluster", func() { Eventually(func() (string, error) { - stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", srcClusterDBName, "-tAc", checkQuery) + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, + exec.DatabaseName(srcClusterDBName), + checkQuery) return strings.Trim(stdOut, "\n"), err }, 180, 15).Should(BeEquivalentTo("3")) }) @@ -1049,8 +1015,10 @@ func AssertReplicaModeCluster( // verify the replica database created followed the source database, rather than // default to the "app" db and user By("checking that in replica cluster there is no database app and user app", func() { - AssertDatabaseExists(namespace, primaryReplicaCluster.Name, "app", false) - AssertUserExists(namespace, primaryReplicaCluster.Name, "app", false) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, + databaseExistsQuery("app"), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, + roleExistsQuery("app"), "f"), 30).Should(Succeed()) }) } } @@ -1058,7 +1026,7 @@ func AssertReplicaModeCluster( // AssertDetachReplicaModeCluster verifies that a replica cluster can be detached from the // source cluster, and its target primary can be promoted. As such, new write operation // on the source cluster shouldn't be received anymore by the detached replica cluster. -// Also, make sure the boostrap fields database and owner of the replica cluster are +// Also, make sure the bootstrap fields database and owner of the replica cluster are // properly ignored func AssertDetachReplicaModeCluster( namespace, @@ -1070,12 +1038,13 @@ func AssertDetachReplicaModeCluster( testTableName string, ) { var primaryReplicaCluster *corev1.Pod - replicaCommandTimeout := time.Second * 10 var referenceTime time.Time By("taking the reference time before the detaching", func() { Eventually(func(g Gomega) { - referenceCondition, err := testsUtils.GetConditionsInClusterStatus(namespace, replicaClusterName, env, + referenceCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, replicaClusterName, apiv1.ConditionClusterReady) g.Expect(err).ToNot(HaveOccurred()) g.Expect(referenceCondition.Status).To(BeEquivalentTo(corev1.ConditionTrue)) @@ -1086,7 +1055,7 @@ func AssertDetachReplicaModeCluster( By("disabling the replica mode", func() { Eventually(func(g Gomega) { - _, _, err := testsUtils.RunUnchecked(fmt.Sprintf( + _, _, err := run.Unchecked(fmt.Sprintf( "kubectl patch cluster %v -n %v -p '{\"spec\":{\"replica\":{\"enabled\":false}}}'"+ " --type='merge'", replicaClusterName, namespace)) @@ -1096,16 +1065,18 @@ func AssertDetachReplicaModeCluster( By("ensuring the replica cluster got promoted and restarted", func() { Eventually(func(g Gomega) { - cluster, err := env.GetCluster(namespace, replicaClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, replicaClusterName) g.Expect(err).ToNot(HaveOccurred()) - condition, err := testsUtils.GetConditionsInClusterStatus(namespace, cluster.Name, env, + condition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, cluster.Name, apiv1.ConditionClusterReady) g.Expect(err).ToNot(HaveOccurred()) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(BeEquivalentTo(corev1.ConditionTrue)) g.Expect(condition.LastTransitionTime.Time).To(BeTemporally(">", referenceTime)) }).WithTimeout(60 * time.Second).Should(Succeed()) - AssertClusterIsReady(namespace, replicaClusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, replicaClusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verifying write operation on the replica cluster primary pod", func() { @@ -1115,10 +1086,17 @@ func AssertDetachReplicaModeCluster( var err error // Get primary from replica cluster - primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName) + primaryReplicaCluster, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + replicaClusterName) g.Expect(err).ToNot(HaveOccurred()) - _, _, err = env.EventuallyExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &replicaCommandTimeout, "psql", "-U", "postgres", srcDatabaseName, "-tAc", query) + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, exec.DatabaseName(srcDatabaseName), + query, + ) g.Expect(err).ToNot(HaveOccurred()) }, 300, 15).Should(Succeed()) }) @@ -1126,17 +1104,33 @@ func AssertDetachReplicaModeCluster( By("verifying the replica database doesn't exist in the replica cluster", func() { // Application database configuration is skipped for replica clusters, // so we expect these to not be present - AssertDatabaseExists(namespace, primaryReplicaCluster.Name, replicaDatabaseName, false) - AssertUserExists(namespace, primaryReplicaCluster.Name, replicaUserName, false) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, + databaseExistsQuery(replicaDatabaseName), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, + roleExistsQuery(replicaUserName), "f"), 30).Should(Succeed()) }) By("writing some new data to the source cluster", func() { - AssertCreateTestDataWithDatabaseName(namespace, srcClusterName, srcDatabaseName, testTableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: srcDatabaseName, + TableName: testTableName, + } + AssertCreateTestData(env, tableLocator) }) By("verifying that replica cluster was not modified", func() { - outTables, stdErr, err := env.EventuallyExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &replicaCommandTimeout, "psql", "-U", "postgres", srcDatabaseName, "-tAc", "\\dt") + outTables, stdErr, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, exec.DatabaseName(srcDatabaseName), + "\\dt", + RetryTimeout, + PollingTime, + ) if err != nil { GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", outTables, stdErr) } @@ -1146,56 +1140,62 @@ func AssertDetachReplicaModeCluster( } func AssertWritesToReplicaFails( - connectingPod *corev1.Pod, - service string, - appDBName string, - appDBUser string, - appDBPass string, + namespace, service, appDBName, appDBUser, appDBPass string, ) { - By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), - func() { - timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(service, appDBUser, appDBName, appDBPass, testsUtils.Require, 5432) + By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() { + Eventually(func(g Gomega) { + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, + appDBName, appDBUser, appDBPass) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + g.Expect(err).ToNot(HaveOccurred()) + var rawValue string // Expect to be connected to a replica - stdout, _, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "select pg_is_in_recovery()") - value := strings.Trim(stdout, "\n") - Expect(value, err).To(Equal("t")) + row := conn.QueryRow("SELECT pg_catalog.pg_is_in_recovery()") + err = row.Scan(&rawValue) + g.Expect(err).ToNot(HaveOccurred()) + isReplica := strings.TrimSpace(rawValue) + g.Expect(isReplica).To(BeEquivalentTo("true")) // Expect to be in a read-only transaction - _, _, err = utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, *connectingPod, - specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "CREATE TABLE IF NOT EXISTS table1(var1 text);") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).Should( - ContainSubstring("cannot execute CREATE TABLE in a read-only transaction")) - }) + _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction")) + }, RetryTimeout).Should(Succeed()) + }) } -func AssertWritesToPrimarySucceeds( - connectingPod *corev1.Pod, - service string, - appDBName string, - appDBUser string, - appDBPass string, -) { - By(fmt.Sprintf("Verifying %v service correctly manages writes", service), - func() { - timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(service, appDBUser, appDBName, appDBPass, testsUtils.Require, 5432) +func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) { + By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() { + Eventually(func(g Gomega) { + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, + appDBName, appDBUser, appDBPass) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + g.Expect(err).ToNot(HaveOccurred()) + var rawValue string // Expect to be connected to a primary - stdout, _, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "select pg_is_in_recovery()") - value := strings.Trim(stdout, "\n") - Expect(value, err).To(Equal("f")) + row := conn.QueryRow("SELECT pg_catalog.pg_is_in_recovery()") + err = row.Scan(&rawValue) + g.Expect(err).ToNot(HaveOccurred()) + isReplica := strings.TrimSpace(rawValue) + g.Expect(isReplica).To(BeEquivalentTo("false")) // Expect to be able to write - _, _, err = env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "CREATE TABLE IF NOT EXISTS table1(var1 text);") - Expect(err).ToNot(HaveOccurred()) - }) + _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") + g.Expect(err).ToNot(HaveOccurred()) + }, RetryTimeout).Should(Succeed()) + }) } func AssertFastFailOver( @@ -1228,30 +1228,21 @@ func AssertFastFailOver( }) By("having a Cluster with three instances ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) // Node 1 should be the primary, so the -rw service should // point there. We verify this. By("having the current primary on node1", func() { - endpointName := clusterName + "-rw" - endpoint := &corev1.Endpoints{} - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } - podName := clusterName + "-1" + rwServiceName := clusterName + "-rw" + endpointSlice, err := testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, rwServiceName) + Expect(err).ToNot(HaveOccurred()) + pod := &corev1.Pod{} - podNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: podName, - } - err = env.Client.Get(env.Ctx, endpointNamespacedName, - endpoint) + podName := clusterName + "-1" + err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: podName}, pod) Expect(err).ToNot(HaveOccurred()) - err = env.Client.Get(env.Ctx, podNamespacedName, pod) - Expect(testsUtils.FirstEndpointIP(endpoint), err).To( - BeEquivalentTo(pod.Status.PodIP)) + Expect(testsUtils.FirstEndpointSliceIP(endpointSlice)).To(BeEquivalentTo(pod.Status.PodIP)) }) By("preparing the db for the test scenario", func() { @@ -1266,16 +1257,10 @@ func AssertFastFailOver( ", PRIMARY KEY (id)" + ")" - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) + _, err = postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -1285,33 +1270,39 @@ func AssertFastFailOver( // on the postgres primary. We make sure that the first // records appear on the database before moving to the next // step. - _, _, err = testsUtils.Run("kubectl create -n " + namespace + + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestFile) Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.Run("kubectl create -n " + namespace + + webtestDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "webtest", Namespace: namespace}} + Expect(deployments.WaitForReady(env.Ctx, env.Client, webtestDeploy, 60)).To(Succeed()) + + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 - timeout := 60 primaryPodName := clusterName + "-1" primaryPodNamespacedName := types.NamespacedName{ Namespace: namespace, Name: primaryPodName, } + query := "SELECT count(*) > 0 FROM tps.tl" Eventually(func() (string, error) { primaryPod := &corev1.Pod{} - err = env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod) - if err != nil { + if err = env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod); err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", - "SELECT count(*) > 0 FROM tps.tl") + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.AppDBName, + query) return strings.TrimSpace(out), err - }, timeout).Should(BeEquivalentTo("t")) + }, RetryTimeout).Should(BeEquivalentTo("t")) }) By("deleting the primary", func() { @@ -1320,7 +1311,7 @@ func AssertFastFailOver( GracePeriodSeconds: &quickDeletionPeriod, } lm := clusterName + "-1" - err = env.DeletePod(namespace, lm, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, lm, quickDelete) Expect(err).ToNot(HaveOccurred()) }) @@ -1332,7 +1323,7 @@ func AssertFastFailOver( func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsCount, secretsCount int) { By("verifying the custom metrics ConfigMaps and Secrets exist", func() { // Create the ConfigMaps and a Secret - _, _, err := testsUtils.Run("kubectl apply -n " + namespace + " -f " + sampleFile) + _, _, err := run.Run("kubectl apply -n " + namespace + " -f " + sampleFile) Expect(err).ToNot(HaveOccurred()) // Check configmaps exist @@ -1358,53 +1349,61 @@ func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsC }) } -func AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBName, tableName string, pod *corev1.Pod) { +func AssertCreationOfTestDataForTargetDB( + env *environment.TestingEnvironment, + namespace, + clusterName, + targetDBName, + tableName string, +) { By(fmt.Sprintf("creating target database '%v' and table '%v'", targetDBName, tableName), func() { - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + // We need to gather the cluster primary to create the database via superuser + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - // We need to gather the cluster primary to create the database via superuser - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + appUser, _, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) // Create database - commandTimeout := time.Second * 10 createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER %v", targetDBName, appUser) - _, _, err = env.ExecCommand( + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: currentPrimary.Namespace, + PodName: currentPrimary.Name, + }, + postgres.PostgresDBName, + createDBQuery) + Expect(err).ToNot(HaveOccurred()) + + // Open a connection to the newly created database + forward, conn, err := postgres.ForwardPSQLConnection( env.Ctx, - *currentPrimary, - specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "-tAc", createDBQuery, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + targetDBName, + apiv1.ApplicationUserSecretSuffix, ) + defer func() { + _ = conn.Close() + forward.Close() + }() Expect(err).ToNot(HaveOccurred()) // Create table on target database createTableQuery := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (id int);", tableName) - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - targetDBName, - appUser, - appUserPass, - createTableQuery, - env, - ) + _, err = conn.Exec(createTableQuery) Expect(err).ToNot(HaveOccurred()) // Grant a permission grantRoleQuery := "GRANT SELECT ON all tables in schema public to pg_monitor;" - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - targetDBName, - appUser, - appUserPass, - grantRoleQuery, - env, - ) + _, err = conn.Exec(grantRoleQuery) Expect(err).ToNot(HaveOccurred()) }) } @@ -1417,7 +1416,6 @@ func AssertApplicationDatabaseConnection( appDB, appPassword, appSecretName string, - pod *corev1.Pod, ) { By("checking cluster can connect with application database user and password", func() { // Get the app user password from the auto generated -app secret if appPassword is not provided @@ -1434,24 +1432,25 @@ func AssertApplicationDatabaseConnection( Expect(err).ToNot(HaveOccurred()) appPassword = string(appSecret.Data["password"]) } - // rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - rwService := testsUtils.CreateServiceFQDN(namespace, testsUtils.GetReadWriteServiceName(clusterName)) + rwService := services.GetReadWriteServiceName(clusterName) - AssertConnection(rwService, appUser, appDB, appPassword, *pod, 60, env) + AssertConnection(namespace, rwService, appDB, appUser, appPassword, env) }) } func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, cluster *apiv1.Cluster) { By("collect and verify metric being exposed with target databases", func() { - podList, err := env.GetClusterPodList(namespace, cluster.Name) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, cluster.Name) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podName := pod.GetName() - out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), + Expect(strings.Contains(out, + fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), "Metric collection issues on %v.\nCollected metrics:\n%v", podName, out) - Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetTwo))).Should(BeTrue(), + Expect(strings.Contains(out, + fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetTwo))).Should(BeTrue(), "Metric collection issues on %v.\nCollected metrics:\n%v", podName, out) Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_test_rows{datname="%v"} 1`, targetSecret))).Should(BeTrue(), @@ -1470,7 +1469,10 @@ func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, clu func CreateAndAssertServerCertificatesSecrets( namespace, clusterName, caSecName, tlsSecName string, includeCAPrivateKey bool, ) { - cluster, caPair, err := testsUtils.CreateSecretCA(namespace, clusterName, caSecName, includeCAPrivateKey, env) + cluster, caPair, err := secrets.CreateSecretCA( + env.Ctx, env.Client, + namespace, clusterName, caSecName, includeCAPrivateKey, + ) Expect(err).ToNot(HaveOccurred()) serverPair, err := caPair.CreateAndSignPair(cluster.GetServiceReadWriteName(), certs.CertTypeServer, @@ -1485,7 +1487,9 @@ func CreateAndAssertServerCertificatesSecrets( func CreateAndAssertClientCertificatesSecrets( namespace, clusterName, caSecName, tlsSecName, userSecName string, includeCAPrivateKey bool, ) { - _, caPair, err := testsUtils.CreateSecretCA(namespace, clusterName, caSecName, includeCAPrivateKey, env) + _, caPair, err := secrets.CreateSecretCA( + env.Ctx, env.Client, + namespace, clusterName, caSecName, includeCAPrivateKey) Expect(err).ToNot(HaveOccurred()) // Sign tls certificates for streaming_replica user @@ -1515,140 +1519,151 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str "sslrootcert=/etc/secrets/ca/ca.crt "+ "dbname=app user=app sslmode=verify-full", clusterName, namespace) timeout := time.Second * 10 - stdout, stderr, err := env.ExecCommand(env.Ctx, appPod, appPod.Spec.Containers[0].Name, &timeout, + stdout, stderr, err := exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, + appPod, appPod.Spec.Containers[0].Name, &timeout, "psql", dsn, "-tAc", "SELECT 1") return stdout, stderr, err }, 360).Should(BeEquivalentTo("1\n")) }) } -func AssertCreateSASTokenCredentials(namespace string, id string, key string) { - // Adding 24 hours to the current time - date := time.Now().UTC().Add(time.Hour * 24) - // Creating date time format for az command - expiringDate := fmt.Sprintf("%v"+"-"+"%d"+"-"+"%v"+"T"+"%v"+":"+"%v"+"Z", - date.Year(), - date.Month(), - date.Day(), - date.Hour(), - date.Minute()) - - out, _, err := testsUtils.Run(fmt.Sprintf( - // SAS Token at Blob Container level does not currently work in Barman Cloud - // https://github.com/EnterpriseDB/barman/issues/388 - // we will use SAS Token at Storage Account level - // ( "az storage container generate-sas --account-name %v "+ - // "--name %v "+ - // "--https-only --permissions racwdl --auth-mode key --only-show-errors "+ - // "--expiry \"$(date -u -d \"+4 hours\" '+%%Y-%%m-%%dT%%H:%%MZ')\"", - // id, blobContainerName ) - "az storage account generate-sas --account-name %v "+ - "--https-only --permissions cdlruwap --account-key %v "+ - "--resource-types co --services b --expiry %v -o tsv", - id, key, expiringDate)) - Expect(err).ToNot(HaveOccurred()) - SASTokenRW := strings.TrimRight(out, "\n") - - out, _, err = testsUtils.Run(fmt.Sprintf( - "az storage account generate-sas --account-name %v "+ - "--https-only --permissions lr --account-key %v "+ - "--resource-types co --services b --expiry %v -o tsv", - id, key, expiringDate)) - Expect(err).ToNot(HaveOccurred()) - SASTokenRO := strings.TrimRight(out, "\n") - - AssertROSASTokenUnableToWrite("restore-cluster-sas", id, SASTokenRO) - - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds-sas", id, SASTokenRW) - AssertStorageCredentialsAreCreated(namespace, "restore-storage-creds-sas", id, SASTokenRO) -} - -func AssertROSASTokenUnableToWrite(containerName string, id string, key string) { - _, _, err := testsUtils.RunUnchecked(fmt.Sprintf("az storage container create "+ - "--name %v --account-name %v "+ - "--sas-token %v", containerName, id, key)) - Expect(err).To(HaveOccurred()) -} - -func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string, pod *corev1.Pod) { +func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string) { By("Async Replication into external cluster", func() { - restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile) Expect(err).ToNot(HaveOccurred()) // Add additional data to the source cluster - sourceClusterName, err := env.GetResourceNameFromYAML(sourceClusterFile) + sourceClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterFile) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(namespace, restoreClusterFile) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary - // NOTE: We use the credentials from the `source-cluster` for the psql connection - // given that this is a replica cluster - restoredPrimary, err := env.GetClusterPrimary(namespace, restoredClusterName) - Expect(err).ToNot(HaveOccurred()) - appUser, appUserPass, err := testsUtils.GetCredentials( - sourceClusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - rwService := testsUtils.CreateServiceFQDN(namespace, testsUtils.GetReadWriteServiceName(restoredClusterName)) - query := "SELECT count(*) FROM " + tableName - out, _, err := testsUtils.RunQueryFromPod( - restoredPrimary, - rwService, - testsUtils.AppDBName, + restoredPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, restoredClusterName) + Expect(err).ToNot(HaveOccurred()) + + // We need the credentials from the source cluster because the replica cluster + // doesn't create the credentials on its own namespace + appUser, appUserPass, err := secrets.GetCredentials( + env.Ctx, + env.Client, + sourceClusterName, + namespace, + apiv1.ApplicationUserSecretSuffix, + ) + Expect(err).ToNot(HaveOccurred()) + + forwardRestored, connRestored, err := postgres.ForwardPSQLConnectionWithCreds( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + restoredClusterName, + postgres.AppDBName, appUser, appUserPass, - query, - env, ) - Expect(strings.Trim(out, "\n"), err).To(BeEquivalentTo("2")) + defer func() { + _ = connRestored.Close() + forwardRestored.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + row := connRestored.QueryRow(fmt.Sprintf("SELECT count(*) FROM %s", tableName)) + var countString string + err = row.Scan(&countString) + Expect(err).ToNot(HaveOccurred()) + Expect(countString).To(BeEquivalentTo("2")) + + forwardSource, connSource, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + sourceClusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = connSource.Close() + forwardSource.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Insert new data in the source cluster - insertRecordIntoTable(namespace, sourceClusterName, tableName, 3, pod) + insertRecordIntoTable(tableName, 3, connSource) AssertArchiveWalOnMinio(namespace, sourceClusterName, sourceClusterName) - AssertDataExpectedCount(namespace, sourceClusterName, tableName, 3, pod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 3) - cluster, err := env.GetCluster(namespace, restoredClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) expectedReplicas := cluster.Spec.Instances - 1 // Cascading replicas should be attached to primary replica - connectedReplicas, err := testsUtils.CountReplicas(env, restoredPrimary) + connectedReplicas, err := postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + restoredPrimary, RetryTimeout, + ) Expect(connectedReplicas, err).To(BeEquivalentTo(expectedReplicas)) }) } -func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableName string, pod *corev1.Pod) { - restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) +func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableName string) { + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile) Expect(err).ToNot(HaveOccurred()) By("Restoring a backup in a new cluster", func() { CreateResourceFromFile(namespace, restoreClusterFile) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary - AssertDataExpectedCount(namespace, restoredClusterName, tableName, 2, pod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: restoredClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) By("Ensuring the restored cluster is on timeline 2", func() { - out, _, err := env.ExecCommandWithPsqlClient( + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, restoredClusterName, - pod, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", + "SELECT substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Trim(out, "\n"), err).To(Equal("00000002")) + + var timeline string + err = row.Scan(&timeline) + Expect(err).ToNot(HaveOccurred()) + Expect(timeline).To(BeEquivalentTo("00000002")) }) // Restored standby should be attached to restored primary - AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140) // Gather Credentials - appUser, appUserPass, err := testsUtils.GetCredentials(restoredClusterName, namespace, - apiv1.ApplicationUserSecretSuffix, env) + appUser, appUserPass, err := secrets.GetCredentials( + env.Ctx, env.Client, + restoredClusterName, namespace, + apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) secretName := restoredClusterName + apiv1.ApplicationUserSecretSuffix @@ -1657,52 +1672,60 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN namespace, restoredClusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, appUserPass, secretName, - pod) + ) }) By("update user application password for restored cluster and verify connectivity", func() { const newPassword = "eeh2Zahohx" //nolint:gosec AssertUpdateSecret("password", newPassword, secretName, namespace, restoredClusterName, 30, env) + AssertApplicationDatabaseConnection( namespace, restoredClusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, newPassword, secretName, - pod) + ) }) } -func AssertClusterRestore(namespace, restoreClusterFile, tableName string, pod *corev1.Pod) { - restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) +func AssertClusterRestore(namespace, restoreClusterFile, tableName string) { + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile) Expect(err).ToNot(HaveOccurred()) By("Restoring a backup in a new cluster", func() { CreateResourceFromFile(namespace, restoreClusterFile) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary primary := restoredClusterName + "-1" - AssertDataExpectedCount(namespace, restoredClusterName, tableName, 2, pod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: restoredClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) // Restored primary should be on timeline 2 - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary, }, - testsUtils.DatabaseName("app"), + postgres.AppDBName, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)") Expect(strings.Trim(out, "\n"), err).To(Equal("00000002")) // Restored standby should be attached to restored primary - AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140) }) } @@ -1712,14 +1735,14 @@ func AssertClusterImport(namespace, clusterWithExternalClusterName, clusterName, var cluster *apiv1.Cluster By("Importing Database in a new cluster", func() { var err error - cluster, err = testsUtils.ImportDatabaseMicroservice(namespace, clusterName, - clusterWithExternalClusterName, "", databaseName, env) + cluster, err = importdb.ImportDatabaseMicroservice(env.Ctx, env.Client, namespace, clusterName, + clusterWithExternalClusterName, "", databaseName) Expect(err).ToNot(HaveOccurred()) // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, clusterWithExternalClusterName, - testTimeouts[testsUtils.ClusterIsReadySlow], env) + testTimeouts[timeouts.ClusterIsReadySlow], env) // Restored standby should be attached to restored primary - AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 140) }) return cluster } @@ -1735,7 +1758,7 @@ func AssertScheduledBackupsImmediate(namespace, backupYAMLPath, scheduledBackupN Namespace: namespace, Name: scheduledBackupName, } - Eventually(func() (*v1.Time, error) { + Eventually(func() (*metav1.Time, error) { scheduledBackup := &apiv1.ScheduledBackup{} err = env.Client.Get(env.Ctx, scheduledBackupNamespacedName, scheduledBackup) @@ -1762,12 +1785,12 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { Eventually(func() error { cmd := fmt.Sprintf("kubectl patch ScheduledBackup %v -n %v -p '{\"spec\":{\"suspend\":true}}' "+ "--type='merge'", scheduledBackupName, namespace) - _, _, err = testsUtils.RunUnchecked(cmd) + _, _, err = run.Unchecked(cmd) if err != nil { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) scheduledBackupNamespacedName := types.NamespacedName{ Namespace: namespace, Name: scheduledBackupName, @@ -1810,12 +1833,12 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { Eventually(func() error { cmd := fmt.Sprintf("kubectl patch ScheduledBackup %v -n %v -p '{\"spec\":{\"suspend\":false}}' "+ "--type='merge'", scheduledBackupName, namespace) - _, _, err = testsUtils.RunUnchecked(cmd) + _, _, err = run.Unchecked(cmd) if err != nil { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) scheduledBackupNamespacedName := types.NamespacedName{ Namespace: namespace, Name: scheduledBackupName, @@ -1837,39 +1860,56 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { }) } -func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, tableName, lsn string, pod *corev1.Pod) { +func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, tableName, lsn string) { // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Gather the recovered cluster primary - primaryInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) secretName := clusterName + apiv1.ApplicationUserSecretSuffix By("Ensuring the restored cluster is on timeline 3", func() { // Restored primary should be on timeline 3 - stdOut, _, err := env.ExecCommandWithPsqlClient( + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - pod, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Trim(stdOut, "\n"), err).To(Equal(lsn)) + + var currentWalLsn string + err = row.Scan(¤tWalLsn) + Expect(err).ToNot(HaveOccurred()) + Expect(currentWalLsn).To(Equal(lsn)) // Restored standby should be attached to restored primary - Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2)) + Expect(postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + primaryInfo, RetryTimeout)).To(BeEquivalentTo(2)) }) By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { // Only 2 entries should be present - AssertDataExpectedCount(namespace, clusterName, tableName, 2, pod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) // Gather credentials - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + appUser, appUserPass, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) By("checking the restored cluster with auto generated app password connectable", func() { @@ -1877,10 +1917,10 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta namespace, clusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, appUserPass, secretName, - pod) + ) }) By("update user application password for restored cluster and verify connectivity", func() { @@ -1890,48 +1930,60 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta namespace, clusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, newPassword, secretName, - pod) + ) }) } -func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn string, pod *corev1.Pod) { - primaryInfo := &corev1.Pod{} - var err error - +func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn string) { By("restoring a backup cluster with PITR in a new cluster", func() { // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) - primaryInfo, err = env.GetClusterPrimary(namespace, clusterName) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) + primaryInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Restored primary should be on timeline 3 - stdOut, _, err := env.ExecCommandWithPsqlClient( + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - pod, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Trim(stdOut, "\n"), err).To(Equal(lsn)) + + var currentWalLsn string + err = row.Scan(¤tWalLsn) + Expect(err).ToNot(HaveOccurred()) + Expect(currentWalLsn).To(Equal(lsn)) // Restored standby should be attached to restored primary - Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2)) + Expect(postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + primaryInfo, RetryTimeout)).To(BeEquivalentTo(2)) }) By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { // Only 2 entries should be present - AssertDataExpectedCount(namespace, clusterName, tableName, 2, pod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) } func AssertArchiveConditionMet(namespace, clusterName, timeout string) { By("Waiting for the condition", func() { - out, _, err := testsUtils.Run(fmt.Sprintf( + out, _, err := run.Run(fmt.Sprintf( "kubectl -n %s wait --for=condition=ContinuousArchiving=true cluster/%s --timeout=%s", namespace, clusterName, timeout)) Expect(err).ToNot(HaveOccurred()) @@ -1940,245 +1992,48 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) { }) } -func AssertArchiveWalOnAzurite(namespace, clusterName string) { - // Create a WAL on the primary and check if it arrives at the Azure Blob Storage within a short time - By("archiving WALs and verifying they exist", func() { - primary := clusterName + "-1" - latestWAL := switchWalAndGetLatestArchive(namespace, primary) - // verifying on blob storage using az - // Define what file we are looking for in Azurite. - // Escapes are required since az expects forward slashes to be escaped - path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) - // verifying on blob storage using az - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) - }, 60).Should(BeEquivalentTo(1)) - }) -} - -func AssertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testsUtils.AzureConfiguration) { - // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time - By("archiving WALs and verifying they exist", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) - // Define what file we are looking for in Azure. - // Escapes are required since az expects forward slashes to be escaped - path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) - // Verifying on blob storage using az - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path) - }, 60).Should(BeEquivalentTo(1)) - }) -} - // switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file func switchWalAndGetLatestArchive(namespace, podName string) string { - _, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err := exec.QueryInInstancePodWithTimeout( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: podName, }, - testsUtils.DatabaseName("postgres"), - "CHECKPOINT;") - Expect(err).ToNot(HaveOccurred()) + postgres.PostgresDBName, + "CHECKPOINT", + 300*time.Second, + ) + Expect(err).ToNot(HaveOccurred(), + "failed to trigger a new wal while executing 'switchWalAndGetLatestArchive'") - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: podName, }, - testsUtils.DatabaseName("postgres"), - "SELECT pg_walfile_name(pg_switch_wal());") - Expect(err).ToNot(HaveOccurred()) + postgres.PostgresDBName, + "SELECT pg_catalog.pg_walfile_name(pg_switch_wal())", + ) + Expect(err).ToNot( + HaveOccurred(), + "failed to get latest wal file name while executing 'switchWalAndGetLatestArchive") return strings.TrimSpace(out) } -func prepareClusterForPITROnMinio( - namespace, - clusterName, - backupSampleFile string, - expectedVal int, - currentTimestamp *string, - pod *corev1.Pod, -) { - const tableNamePitr = "for_restore" - - By("backing up a cluster and verifying it exists on minio", func() { - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testsUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeNumerically(">=", expectedVal), - fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, - expectedVal)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(namespace, clusterName, tableNamePitr, pod) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env, pod) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - insertRecordIntoTable(namespace, clusterName, tableNamePitr, 3, pod) - }) - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - AssertArchiveConditionMet(namespace, clusterName, "5m") - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterForPITROnAzureBlob( - namespace string, - clusterName string, - backupSampleFile string, - azureConfig testsUtils.AzureConfiguration, - expectedVal int, - currentTimestamp *string, - pod *corev1.Pod, -) { - const tableNamePitr = "for_restore" - By("backing up a cluster and verifying it exists on Azure Blob", func() { - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") - }, 30).Should(BeEquivalentTo(expectedVal)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(namespace, clusterName, tableNamePitr, pod) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env, pod) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - insertRecordIntoTable(namespace, clusterName, tableNamePitr, 3, pod) - }) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - AssertArchiveConditionMet(namespace, clusterName, "5m") - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { - By("creating the Azurite storage credentials", func() { - err := testsUtils.CreateStorageCredentialsOnAzurite(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - By("setting up Azurite to hold the backups", func() { - // Deploying azurite for blob storage - err := testsUtils.InstallAzurite(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - By("setting up az-cli", func() { - // This is required as we have a service of Azurite running locally. - // In order to connect, we need az cli inside the namespace - err := testsUtils.InstallAzCli(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - // Creating cluster - AssertCreateCluster(namespace, clusterName, clusterSampleFile, env) - - AssertArchiveConditionMet(namespace, clusterName, "5m") -} - -func prepareClusterBackupOnAzurite( - namespace, - clusterName, - clusterSampleFile, - backupFile, - tableName string, - pod *corev1.Pod, -) { - // Setting up Azurite and az cli along with Postgresql cluster - prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, pod) - AssertArchiveWalOnAzurite(namespace, clusterName) - - By("backing up a cluster and verifying it exists on azurite", func() { - // We create a Backup - testsUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testsUtils.BackupIsReady], env) - // Verifying file called data.tar should be available on Azurite blob storage - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterForPITROnAzurite( - namespace, - clusterName, - backupSampleFile string, - currentTimestamp *string, - pod *corev1.Pod, -) { - By("backing up a cluster and verifying it exists on azurite", func() { - // We create a Backup - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - // Verifying file called data.tar should be available on Azurite blob storage - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(namespace, clusterName, "for_restore", pod) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env, pod) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { - insertRecordIntoTable(namespace, clusterName, "for_restore", 3, pod) - }) - AssertArchiveWalOnAzurite(namespace, clusterName) -} - func createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerYamlFilePath string, expectedInstanceCount int) { CreateResourceFromFile(namespace, poolerYamlFilePath) - Eventually(func() (int32, error) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) + g.Expect(err).ToNot(HaveOccurred()) // Wait for the deployment to be ready deployment := &appsv1.Deployment{} err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: poolerName}, deployment) - - return deployment.Status.ReadyReplicas, err - }, 300).Should(BeEquivalentTo(expectedInstanceCount)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(deployment.Status.ReadyReplicas).To(BeEquivalentTo(expectedInstanceCount)) + }, 300).Should(Succeed()) // check pooler pod is up and running assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath, expectedInstanceCount) @@ -2189,41 +2044,31 @@ func assertPgBouncerPoolerDeploymentStrategy( expectedMaxSurge, expectedMaxUnavailable string, ) { By("verify pooler deployment has expected rolling update configuration", func() { - Eventually(func() bool { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) + g.Expect(err).ToNot(HaveOccurred()) // Wait for the deployment to be ready deployment := &appsv1.Deployment{} err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: poolerName}, deployment) - if err != nil { - return false - } - if expectedMaxSurge == deployment.Spec.Strategy.RollingUpdate.MaxSurge.String() && - expectedMaxUnavailable == deployment.Spec.Strategy.RollingUpdate.MaxUnavailable.String() { - return true - } - return false - }, 300).Should(BeTrue()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(deployment.Spec.Strategy.RollingUpdate.MaxSurge.String()).To(BeEquivalentTo(expectedMaxSurge)) + g.Expect(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable.String()).To(BeEquivalentTo(expectedMaxUnavailable)) + }, 300).Should(Succeed()) }) } // assertPGBouncerPodsAreReady verifies if PGBouncer pooler pods are ready func assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath string, expectedPodCount int) { - Eventually(func() (bool, error) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) + g.Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerName}) - if err != nil { - return false, err - } + g.Expect(err).ToNot(HaveOccurred()) podItemsCount := len(podList.Items) - if podItemsCount != expectedPodCount { - return false, fmt.Errorf("expected pgBouncer pods count match passed expected instance count. "+ - "Got: %v, Expected: %v", podItemsCount, expectedPodCount) - } + g.Expect(podItemsCount).To(BeEquivalentTo(expectedPodCount)) activeAndReadyPodCount := 0 for _, item := range podList.Items { @@ -2232,14 +2077,8 @@ func assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath string, expectedP } continue } - - if activeAndReadyPodCount != expectedPodCount { - return false, fmt.Errorf("expected pgBouncer pods to be all active and ready. Got: %v, Expected: %v", - activeAndReadyPodCount, expectedPodCount) - } - - return true, nil - }, 90).Should(BeTrue()) + g.Expect(activeAndReadyPodCount).To(BeEquivalentTo(expectedPodCount)) + }, 90).Should(Succeed()) } func assertReadWriteConnectionUsingPgBouncerService( @@ -2248,28 +2087,29 @@ func assertReadWriteConnectionUsingPgBouncerService( poolerYamlFilePath string, isPoolerRW bool, ) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerService, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) - poolerService := testsUtils.CreateServiceFQDN(namespace, poolerName) - appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + + appUser, generatedAppUserPassword, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) - AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, *psqlClientPod, 180, env) + AssertConnection(namespace, poolerService, postgres.AppDBName, appUser, generatedAppUserPassword, env) // verify that, if pooler type setup read write then it will allow both read and // write operations or if pooler type setup read only then it will allow only read operations if isPoolerRW { - AssertWritesToPrimarySucceeds(psqlClientPod, poolerService, "app", appUser, + AssertWritesToPrimarySucceeds(namespace, poolerService, "app", appUser, generatedAppUserPassword) } else { - AssertWritesToReplicaFails(psqlClientPod, poolerService, "app", appUser, + AssertWritesToReplicaFails(namespace, poolerService, "app", appUser, generatedAppUserPassword) } } func assertPodIsRecreated(namespace, poolerSampleFile string) { var podNameBeforeDelete string - poolerName, err := env.GetResourceNameFromYAML(poolerSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerSampleFile) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("deleting pooler '%s' pod", poolerName), func() { @@ -2283,7 +2123,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) { // deleting pgbouncer pod cmd := fmt.Sprintf("kubectl delete pod %s -n %s", podNameBeforeDelete, namespace) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("verifying pooler '%s' pod has been recreated", poolerName), func() { @@ -2297,7 +2137,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) { } if len(podList.Items) == 1 { if utils.IsPodActive(podList.Items[0]) && utils.IsPodReady(podList.Items[0]) { - if !(podNameBeforeDelete == podList.Items[0].GetName()) { + if podNameBeforeDelete != podList.Items[0].GetName() { return true, err } } @@ -2310,7 +2150,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) { func assertDeploymentIsRecreated(namespace, poolerSampleFile string) { var deploymentUID types.UID - poolerName, err := env.GetResourceNameFromYAML(poolerSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerSampleFile) Expect(err).ToNot(HaveOccurred()) deploymentNamespacedName := types.NamespacedName{ @@ -2322,7 +2162,7 @@ func assertDeploymentIsRecreated(namespace, poolerSampleFile string) { err := env.Client.Get(env.Ctx, deploymentNamespacedName, deployment) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) - err = testsUtils.DeploymentWaitForReady(env, deployment, 60) + err = deployments.WaitForReady(env.Ctx, env.Client, deployment, 60) Expect(err).ToNot(HaveOccurred()) deploymentName := deployment.GetName() @@ -2351,7 +2191,7 @@ func assertDeploymentIsRecreated(namespace, poolerSampleFile string) { }, 300).ShouldNot(BeEquivalentTo(deploymentUID)) }) By(fmt.Sprintf("new '%s' deployment has new pods ready", deploymentName), func() { - err := testsUtils.DeploymentWaitForReady(env, deployment, 120) + err := deployments.WaitForReady(env.Ctx, env.Client, deployment, 120) Expect(err).ToNot(HaveOccurred()) }) By("verifying UIDs of pods have changed", func() { @@ -2376,34 +2216,36 @@ func assertPGBouncerEndpointsContainsPodsIP( poolerYamlFilePath string, expectedPodCount int, ) { - var pgBouncerPods []*corev1.Pod - endpoint := &corev1.Endpoints{} - endpointName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerServiceName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) + endpointSlice := &discoveryv1.EndpointSlice{} Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: endpointName}, endpoint) + var err error + endpointSlice, err = testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, poolerServiceName) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerName}) Expect(err).ToNot(HaveOccurred()) - Expect(endpoint.Subsets).ToNot(BeEmpty()) + Expect(endpointSlice.Endpoints).ToNot(BeEmpty()) - for _, ip := range endpoint.Subsets[0].Addresses { + var pgBouncerPods []*corev1.Pod + for _, endpoint := range endpointSlice.Endpoints { + ip := endpoint.Addresses[0] for podIndex, pod := range podList.Items { - if pod.Status.PodIP == ip.IP { + if pod.Status.PodIP == ip { pgBouncerPods = append(pgBouncerPods, &podList.Items[podIndex]) continue } } } - Expect(pgBouncerPods).Should(HaveLen(expectedPodCount), "Pod length or IP mismatch in ep") + Expect(pgBouncerPods).Should(HaveLen(expectedPodCount), "Pod length or IP mismatch in endpoint") } // assertPGBouncerHasServiceNameInsideHostParameter makes sure that the service name is contained inside the host file @@ -2411,7 +2253,7 @@ func assertPGBouncerHasServiceNameInsideHostParameter(namespace, serviceName str for _, pod := range podList.Items { command := fmt.Sprintf("kubectl exec -n %s %s -- /bin/bash -c 'grep "+ " \"host=%s\" controller/configs/pgbouncer.ini'", namespace, pod.Name, serviceName) - out, _, err := testsUtils.Run(command) + out, _, err := run.Run(command) Expect(err).ToNot(HaveOccurred()) expectedContainedHost := fmt.Sprintf("host=%s", serviceName) Expect(out).To(ContainSubstring(expectedContainedHost)) @@ -2420,7 +2262,10 @@ func assertPGBouncerHasServiceNameInsideHostParameter(namespace, serviceName str // OnlineResizePVC is for verifying if storage can be automatically expanded, or not func OnlineResizePVC(namespace, clusterName string) { - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) pvc := &corev1.PersistentVolumeClaimList{} @@ -2446,9 +2291,9 @@ func OnlineResizePVC(namespace, clusterName string) { namespace, s) Eventually(func() error { - _, _, err := testsUtils.RunUnchecked(cmd) + _, _, err := run.Unchecked(cmd) return err - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) } }) By("verifying Cluster storage is expanded", func() { @@ -2457,12 +2302,12 @@ func OnlineResizePVC(namespace, clusterName string) { if walStorageEnabled { expectedCount = 6 } - Eventually(func() int { + Eventually(func(g Gomega) { // Variable counter to store the updated total of expanded PVCs. It should be equal to three updateCount := 0 // Gathering PVC list err := env.Client.List(env.Ctx, pvc, ctrlclient.InNamespace(namespace)) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Iterating through PVC list to compare with expanded size for _, pvClaim := range pvc.Items { // Size comparison @@ -2470,13 +2315,16 @@ func OnlineResizePVC(namespace, clusterName string) { updateCount++ } } - return updateCount - }, 300).Should(BeEquivalentTo(expectedCount)) + g.Expect(updateCount).To(BeEquivalentTo(expectedCount)) + }, 300).Should(Succeed()) }) } func OfflineResizePVC(namespace, clusterName string, timeout int) { - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) By("verify PVC size before expansion", func() { @@ -2502,64 +2350,65 @@ func OfflineResizePVC(namespace, clusterName string, timeout int) { namespace, s) Eventually(func() error { - _, _, err := testsUtils.RunUnchecked(cmd) + _, _, err := run.Unchecked(cmd) return err - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) } }) By("deleting Pod and PVCs, first replicas then the primary", func() { // Gathering cluster primary - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) currentPrimaryWalStorageName := currentPrimary.Name + "-wal" quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) Expect(len(podList.Items), err).To(BeEquivalentTo(3)) // Iterating through PVC list for deleting pod and PVC for storage expansion - for _, pod := range podList.Items { + for _, p := range podList.Items { // Comparing cluster pods to not be primary to ensure cluster is healthy. // Primary will be eventually deleted - if !specs.IsPodPrimary(pod) { + if !specs.IsPodPrimary(p) { // Deleting PVC - _, _, err = testsUtils.Run( - "kubectl delete pvc " + pod.Name + " -n " + namespace + " --wait=false") + _, _, err = run.Run( + "kubectl delete pvc " + p.Name + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) // Deleting WalStorage PVC if needed if walStorageEnabled { - _, _, err = testsUtils.Run( - "kubectl delete pvc " + pod.Name + "-wal" + " -n " + namespace + " --wait=false") + _, _, err = run.Run( + "kubectl delete pvc " + p.Name + "-wal" + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) } // Deleting standby and replica pods - err = env.DeletePod(namespace, pod.Name, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, p.Name, quickDelete) Expect(err).ToNot(HaveOccurred()) } } AssertClusterIsReady(namespace, clusterName, timeout, env) // Deleting primary pvc - _, _, err = testsUtils.Run( + _, _, err = run.Run( "kubectl delete pvc " + currentPrimary.Name + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) // Deleting Primary WalStorage PVC if needed if walStorageEnabled { - _, _, err = testsUtils.Run( + _, _, err = run.Run( "kubectl delete pvc " + currentPrimaryWalStorageName + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) } // Deleting primary pod - err = env.DeletePod(namespace, currentPrimary.Name, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary.Name, quickDelete) Expect(err).ToNot(HaveOccurred()) }) AssertClusterIsReady(namespace, clusterName, timeout, env) By("verifying Cluster storage is expanded", func() { // Gathering PVC list for comparison - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) // Gathering PVC size and comparing with expanded value expectedCount := 3 @@ -2586,21 +2435,24 @@ func DeleteTableUsingPgBouncerService( namespace, clusterName, poolerYamlFilePath string, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, pod *corev1.Pod, ) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerService, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) - poolerService := testsUtils.CreateServiceFQDN(namespace, poolerName) - appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + + appUser, generatedAppUserPassword, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, *pod, 180, env) + AssertConnection(namespace, poolerService, postgres.AppDBName, appUser, generatedAppUserPassword, env) - _, _, err = testsUtils.RunQueryFromPod( - pod, poolerService, "app", appUser, generatedAppUserPassword, - "DROP TABLE table1", - env) + connectionTimeout := time.Second * 10 + dsn := services.CreateDSN(poolerService, appUser, postgres.AppDBName, generatedAppUserPassword, + services.Require, 5432) + _, _, err = env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &connectionTimeout, + "psql", dsn, "-tAc", "DROP TABLE table1") Expect(err).ToNot(HaveOccurred()) } @@ -2626,11 +2478,11 @@ func collectAndAssertDefaultMetricsPresentOnEachPod( ) } - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podName := pod.GetName() - out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, tlsEnabled) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, tlsEnabled) Expect(err).ToNot(HaveOccurred()) // error should be zero on each pod metrics @@ -2669,24 +2521,28 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) { "cnpg_collector_replica_mode", } - if env.PostgresVersion > 14 { + if env.PostgresVersion >= 14 { cnpgCollectorMetrics = append(cnpgCollectorMetrics, "cnpg_collector_wal_records", "cnpg_collector_wal_fpi", "cnpg_collector_wal_bytes", "cnpg_collector_wal_buffers_full", - "cnpg_collector_wal_write", - "cnpg_collector_wal_sync", - "cnpg_collector_wal_write_time", - "cnpg_collector_wal_sync_time", ) + if env.PostgresVersion < 18 { + cnpgCollectorMetrics = append(cnpgCollectorMetrics, + "cnpg_collector_wal_write", + "cnpg_collector_wal_sync", + "cnpg_collector_wal_write_time", + "cnpg_collector_wal_sync_time", + ) + } } By("collecting and verify set of collector metrics on each pod", func() { - podList, err := env.GetClusterPodList(cluster.Namespace, cluster.Name) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, cluster.Namespace, cluster.Name) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podName := pod.GetName() - out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred()) // error should be zero on each pod metrics @@ -2706,17 +2562,17 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) { // YAML sample file and returns any errors func CreateResourcesFromFileWithError(namespace, sampleFilePath string) error { wrapErr := func(err error) error { return fmt.Errorf("on CreateResourcesFromFileWithError: %w", err) } - yaml, err := GetYAMLContent(sampleFilePath) + yamlContent, err := GetYAMLContent(sampleFilePath) if err != nil { return wrapErr(err) } - objects, err := testsUtils.ParseObjectsFromYAML(yaml, namespace) + objects, err := yaml.ParseObjectsFromYAML(yamlContent, namespace) if err != nil { return wrapErr(err) } for _, obj := range objects { - _, err := testsUtils.CreateObject(env, obj) + _, err := objectsutils.Create(env.Ctx, env.Client, obj) if err != nil { return wrapErr(err) } @@ -2728,7 +2584,7 @@ func CreateResourcesFromFileWithError(namespace, sampleFilePath string) error { func CreateResourceFromFile(namespace, sampleFilePath string) { Eventually(func() error { return CreateResourcesFromFileWithError(namespace, sampleFilePath) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) } // GetYAMLContent opens a .yaml of .template file and returns its content @@ -2742,7 +2598,7 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) { if err != nil { return nil, wrapErr(err) } - yaml := data + yamlContent := data if filepath.Ext(cleanPath) == ".template" { preRollingUpdateImg := os.Getenv("E2E_PRE_ROLLING_UPDATE_IMG") @@ -2762,12 +2618,12 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) { envVars["SERVER_NAME"] = serverName } - yaml, err = testsUtils.Envsubst(envVars, data) + yamlContent, err = envsubst.Envsubst(envVars, data) if err != nil { return nil, wrapErr(err) } } - return yaml, nil + return yamlContent, nil } func buildTemplateEnvs(additionalEnvs map[string]string) map[string]string { @@ -2791,17 +2647,17 @@ func buildTemplateEnvs(additionalEnvs map[string]string) map[string]string { // DeleteResourcesFromFile deletes the Kubernetes objects described in the file func DeleteResourcesFromFile(namespace, sampleFilePath string) error { wrapErr := func(err error) error { return fmt.Errorf("in DeleteResourcesFromFile: %w", err) } - yaml, err := GetYAMLContent(sampleFilePath) + yamlContent, err := GetYAMLContent(sampleFilePath) if err != nil { return wrapErr(err) } - objects, err := testsUtils.ParseObjectsFromYAML(yaml, namespace) + objects, err := yaml.ParseObjectsFromYAML(yamlContent, namespace) if err != nil { return wrapErr(err) } for _, obj := range objects { - err := testsUtils.DeleteObject(env, obj) + err := objectsutils.Delete(env.Ctx, env.Client, obj) if err != nil { return wrapErr(err) } @@ -2809,43 +2665,17 @@ func DeleteResourcesFromFile(namespace, sampleFilePath string) error { return nil } -// Assert in the giving cluster, all the postgres db has no pending restart -func AssertPostgresNoPendingRestart(namespace, clusterName string, cmdTimeout time.Duration, timeout int) { - By("waiting for all pods have no pending restart", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - // Check that the new parameter has been modified in every pod - Eventually(func() (bool, error) { - noPendingRestart := true - for _, pod := range podList.Items { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &cmdTimeout, - "psql", "-U", "postgres", "-tAc", "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)") - if err != nil { - return false, nil - } - if strings.Trim(stdout, "\n") == "f" { - continue - } - - noPendingRestart = false - break - } - return noPendingRestart, nil - }, timeout, 2).Should(BeEquivalentTo(true), - "all pods in cluster has no pending restart") - }) -} - func AssertBackupConditionTimestampChangedInClusterStatus( namespace, clusterName string, clusterConditionType apiv1.ClusterConditionType, - lastTransactionTimeStamp *v1.Time, + lastTransactionTimeStamp *metav1.Time, ) { By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { Eventually(func() (bool, error) { - getBackupCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, clusterConditionType) + getBackupCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, clusterName, clusterConditionType) if err != nil { return false, err } @@ -2854,30 +2684,18 @@ func AssertBackupConditionTimestampChangedInClusterStatus( }) } -func AssertBackupConditionInClusterStatus(namespace, clusterName string) { - By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { - Eventually(func() (string, error) { - getBackupCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, apiv1.ConditionBackup) - if err != nil { - return "", err - } - return string(getBackupCondition.Status), nil - }, 300, 5).Should(BeEquivalentTo("True")) - }) -} - func AssertClusterReadinessStatusIsReached( namespace, clusterName string, conditionStatus apiv1.ConditionStatus, timeout int, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { By(fmt.Sprintf("waiting for cluster condition status in cluster '%v'", clusterName), func() { Eventually(func() (string, error) { - clusterCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, apiv1.ConditionClusterReady) + clusterCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, clusterName, apiv1.ConditionClusterReady) if err != nil { return "", err } @@ -2896,7 +2714,7 @@ func AssertPvcHasLabels( By("checking PVC have the correct role labels", func() { Eventually(func(g Gomega) { // Gather the list of PVCs in the current namespace - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) g.Expect(err).ToNot(HaveOccurred()) // Iterating through PVC list @@ -2924,7 +2742,7 @@ func AssertPvcHasLabels( utils.PvcRoleLabelName: ExpectedPvcRole, utils.ClusterInstanceRoleLabelName: ExpectedRole, } - g.Expect(testsUtils.PvcHasLabels(pvc, expectedLabels)).To(BeTrue(), + g.Expect(storage.PvcHasLabels(pvc, expectedLabels)).To(BeTrue(), fmt.Sprintf("expectedLabels: %v and found actualLabels on pvc: %v", expectedLabels, pod.GetLabels())) } @@ -2945,33 +2763,45 @@ func AssertReplicationSlotsOnPod( ) { GinkgoWriter.Println("checking contain slots:", expectedSlots, "for pod:", pod.Name) Eventually(func() ([]string, error) { - currentSlots, err := testsUtils.GetReplicationSlotsOnPod(namespace, pod.GetName(), env) + currentSlots, err := replicationslot.GetReplicationSlotsOnPod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, pod.GetName(), postgres.AppDBName) return currentSlots, err }, 300).Should(ContainElements(expectedSlots), func() string { - return testsUtils.PrintReplicationSlots(namespace, clusterName, env) + return replicationslot.PrintReplicationSlots( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName) }) GinkgoWriter.Println("executing replication slot assertion query on pod", pod.Name) for _, slot := range expectedSlots { query := fmt.Sprintf( - "SELECT EXISTS (SELECT 1 FROM pg_replication_slots "+ + "SELECT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots "+ "WHERE slot_name = '%v' AND active = '%t' "+ "AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnReplica) if specs.IsPodPrimary(pod) { query = fmt.Sprintf( - "SELECT EXISTS (SELECT 1 FROM pg_replication_slots "+ + "SELECT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots "+ "WHERE slot_name = '%v' AND active = '%t' "+ "AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnPrimary) } Eventually(func() (string, error) { - stdout, _, err := testsUtils.RunQueryFromPod(&pod, testsUtils.PGLocalSocketDir, - "app", "postgres", "''", query, env) + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + query) return strings.TrimSpace(stdout), err }, 300).Should(BeEquivalentTo("t"), func() string { - return testsUtils.PrintReplicationSlots(namespace, clusterName, env) + return replicationslot.PrintReplicationSlots( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName) }) } } @@ -2982,19 +2812,23 @@ func AssertClusterReplicationSlotsAligned( namespace, clusterName string, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { + Eventually(func(g Gomega) { var lsnList []string for _, pod := range podList.Items { - out, err := testsUtils.GetReplicationSlotLsnsOnPod(namespace, clusterName, pod, env) - Expect(err).ToNot(HaveOccurred()) + out, err := replicationslot.GetReplicationSlotLsnsOnPod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, pod) + g.Expect(err).ToNot(HaveOccurred(), "error getting replication slot lsn on pod %v", pod.Name) lsnList = append(lsnList, out...) } - return testsUtils.AreSameLsn(lsnList) - }, 300).Should(BeEquivalentTo(true), + g.Expect(replicationslot.AreSameLsn(lsnList)).To(BeTrue()) + }).WithTimeout(300*time.Second).WithPolling(2*time.Second).Should(Succeed(), func() string { - return testsUtils.PrintReplicationSlots(namespace, clusterName, env) + return replicationslot.PrintReplicationSlots( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName) }) } @@ -3002,10 +2836,12 @@ func AssertClusterReplicationSlotsAligned( // of the cluster exist and are aligned. func AssertClusterHAReplicationSlots(namespace, clusterName string) { By("verifying all cluster's replication slots exist and are aligned", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod( + env.Ctx, env.Client, + namespace, clusterName, pod.GetName()) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false) } @@ -3016,7 +2852,7 @@ func AssertClusterHAReplicationSlots(namespace, clusterName string) { // AssertClusterRollingRestart restarts a given cluster func AssertClusterRollingRestart(namespace, clusterName string) { By(fmt.Sprintf("restarting cluster %v", clusterName), func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) clusterRestarted := cluster.DeepCopy() if clusterRestarted.Annotations == nil { @@ -3029,14 +2865,14 @@ func AssertClusterRollingRestart(namespace, clusterName string) { }) AssertClusterEventuallyReachesPhase(namespace, clusterName, []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) } // AssertPVCCount matches count and pvc List. func AssertPVCCount(namespace, clusterName string, pvcCount, timeout int) { By(fmt.Sprintf("verify cluster %v healthy pvc list", clusterName), func() { Eventually(func(g Gomega) { - cluster, _ := env.GetCluster(namespace, clusterName) + cluster, _ := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(cluster.Status.PVCCount).To(BeEquivalentTo(pvcCount)) pvcList := &corev1.PersistentVolumeClaimList{} @@ -3071,7 +2907,7 @@ func AssertClusterEventuallyReachesPhase(namespace, clusterName string, phase [] // assertPredicateClusterHasPhase returns true if the Cluster's phase is contained in a given slice of phases func assertPredicateClusterHasPhase(namespace, clusterName string, phase []string) func(g Gomega) { return func(g Gomega) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) g.Expect(slices.Contains(phase, cluster.Status.Phase)).To(BeTrue()) } diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go new file mode 100644 index 0000000000..e6eaea7e24 --- /dev/null +++ b/tests/e2e/backup_restore_azure_test.go @@ -0,0 +1,531 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + ) + AzureConfiguration := backups.NewAzureConfigurationFromEnv() + + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsAKS() { + Skip("This test is only run on AKS clusters") + } + }) + + Context("using azure blobs as object storage with storage account access authentication", Ordered, func() { + // We must be careful here. All the clusters use the same remote storage + // and that means that we must use different cluster names otherwise + // we risk mixing WALs and backups + const azureBlobSampleFile = fixturesDir + "/backup/azure_blob/cluster-with-backup-azure-blob.yaml.template" + const clusterRestoreSampleFile = fixturesDir + "/backup/azure_blob/cluster-from-restore.yaml.template" + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azure-blob.yaml" + backupFile := fixturesDir + "/backup/azure_blob/backup-azure-blob.yaml" + var namespace, clusterName string + + BeforeAll(func() { + const namespacePrefix = "cluster-backup-azure-blob" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azureBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests. + // The credentials are retrieved from the environment variables, as we can't create + // a fixture for them + By("creating the Azure Blob Storage credentials", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + AzureConfiguration.StorageAccount, + AzureConfiguration.StorageKey, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, azureBlobSampleFile, env) + }) + + // We back up and restore a cluster, and verify some expected data to + // be there + It("backs up and restore a cluster", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) + By("uploading a backup", func() { + // We create a backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFile, false, + testTimeouts[testUtils.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + }) + + // Restore backup in a new cluster + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + By("deleting the restored cluster", func() { + err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available + It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) + + // Only one data.tar files should be present + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, + clusterName, "data.tar") + }, 30).Should(BeNumerically("==", 2)) + }) + + It("backs up and restore a cluster with PITR", func() { + restoredClusterName := "restore-cluster-azure-pitr" + currentTimestamp := new(string) + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + backupFile, + AzureConfiguration, + 2, + currentTimestamp, + ) + + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) + + cluster, err := backups.CreateClusterFromBackupUsingPITR( + env.Ctx, + env.Client, + env.Scheme, + namespace, + restoredClusterName, + backupFile, + *currentTimestamp, + ) + Expect(err).ToNot(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") + By("deleting the restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) + }) + }) + + // We create a cluster, create a scheduled backup, patch it to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 480) + + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, + clusterName, "data.tar") + }, 60).Should(BeNumerically(">=", 2)) + }) + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + }) +}) + +var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + sourceBackupFileAzure = fixturesBackupDir + "backup-azure-blob-02.yaml" + clusterSourceFileAzure = fixturesBackupDir + "source-cluster-azure-blob-01.yaml.template" + externalClusterFileAzure = fixturesBackupDir + "external-clusters-azure-blob-03.yaml.template" + sourceBackupFileAzurePITR = fixturesBackupDir + "backup-azure-blob-pitr.yaml" + tableName = "to_restore" + clusterSourceFileAzureSAS = fixturesBackupDir + "cluster-with-backup-azure-blob-sas.yaml.template" + clusterRestoreFileAzureSAS = fixturesBackupDir + "cluster-from-restore-sas.yaml.template" + sourceBackupFileAzureSAS = fixturesBackupDir + "backup-azure-blob-sas.yaml" + sourceBackupFileAzurePITRSAS = fixturesBackupDir + "backup-azure-blob-pitr-sas.yaml" + level = tests.High + ) + + currentTimestamp := new(string) + AzureConfiguration := backups.NewAzureConfigurationFromEnv() + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsAKS() { + Skip("This test is only executed on AKS clusters") + } + }) + + // Restore cluster using a recovery object store, that is a backup of another cluster, + // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section + + Context("using azure blobs as object storage", func() { + Context("storage account access authentication", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + const namespacePrefix = "recovery-barman-object-azure" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileAzure) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests. + // The credentials are retrieved from the environment variables, as we can't create + // a fixture for them + By("creating the Azure Blob Storage credentials", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + AzureConfiguration.StorageAccount, + AzureConfiguration.StorageKey, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) + }) + + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // Create the backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, sourceBackupFileAzure, false, + testTimeouts[testUtils.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileAzure, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using "+ + "'barmanObjectStore' option in 'externalClusters' section", func() { + externalClusterName := "external-cluster-azure-pitr" + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + sourceBackupFileAzurePITR, + AzureConfiguration, + 2, + currentTimestamp, + ) + + restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzure( + env.Ctx, + env.Client, + namespace, + externalClusterName, + clusterName, + *currentTimestamp, + "backup-storage-creds", + AzureConfiguration.StorageAccount, + AzureConfiguration.BlobContainer, + ) + Expect(err).ToNot(HaveOccurred()) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) + }) + }) + }) + + Context("storage account SAS Token authentication", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + if !IsAKS() { + Skip("This test is only executed on AKS clusters") + } + const namespacePrefix = "cluster-backup-azure-blob-sas" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileAzureSAS) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests, + // we get the credentials from the environment variables as we can't create + // a fixture for them + By("creating the Azure Blob Container SAS Token credentials", func() { + err = backups.CreateSASTokenCredentials( + env.Ctx, + env.Client, + namespace, + AzureConfiguration.StorageAccount, + AzureConfiguration.StorageKey, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the Cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) + }) + + It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + // Create a WAL on the primary and check if it arrives in the + // Azure Blob Storage within a short time + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // We create a Backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, sourceBackupFileAzureSAS, false, + testTimeouts[testUtils.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using "+ + "'barmanObjectStore' option in 'externalClusters' section", func() { + externalClusterName := "external-cluster-azure-pitr" + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + sourceBackupFileAzurePITRSAS, + AzureConfiguration, + 2, + currentTimestamp, + ) + + restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzure( + env.Ctx, + env.Client, + namespace, + externalClusterName, + clusterName, + *currentTimestamp, + "backup-storage-creds-sas", + AzureConfiguration.StorageAccount, + AzureConfiguration.BlobContainer, + ) + Expect(err).ToNot(HaveOccurred()) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) + }) + }) + }) + }) +}) + +func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration backups.AzureConfiguration) { + // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time + By("archiving WALs and verifying they exist", func() { + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) + // Define what file we are looking for in Azure. + // Escapes are required since az expects forward slashes to be escaped + path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) + // Verifying on blob storage using az + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(configuration, clusterName, path) + }, 60).Should(BeEquivalentTo(1)) + }) +} + +func prepareClusterForPITROnAzureBlob( + namespace string, + clusterName string, + backupSampleFile string, + azureConfig backups.AzureConfiguration, + expectedVal int, + currentTimestamp *string, +) { + const tableNamePitr = "for_restore" + By("backing up a cluster and verifying it exists on Azure Blob", func() { + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupSampleFile, false, + testTimeouts[testUtils.BackupIsReady], + ) + + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") + }, 30).Should(BeEquivalentTo(expectedVal)) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck + }, 30).Should(Succeed()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableNamePitr, 3, conn) + }) + assertArchiveWalOnAzureBlob(namespace, clusterName, azureConfig) + AssertArchiveConditionMet(namespace, clusterName, "5m") + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) +} diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go new file mode 100644 index 0000000000..645ef88ac8 --- /dev/null +++ b/tests/e2e/backup_restore_azurite_test.go @@ -0,0 +1,383 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + + if !(IsLocal() || IsGKE() || IsOpenshift()) { + Skip("This test is only executed on gke, openshift and local") + } + }) + + Context("using Azurite blobs as object storage", Ordered, func() { + // This is a set of tests using an Azurite server deployed in the same + // namespace as the cluster. Since each cluster is installed in its + // own namespace, they can share the configuration file + const ( + clusterRestoreSampleFile = fixturesDir + "/backup/azurite/cluster-from-restore.yaml.template" + scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azurite.yaml" + scheduledBackupImmediateSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azurite.yaml" + backupFile = fixturesDir + "/backup/azurite/backup.yaml" + azuriteCaSecName = "azurite-ca-secret" + azuriteTLSSecName = "azurite-tls-secret" + ) + var namespace, clusterName string + + BeforeAll(func() { + const namespacePrefix = "cluster-backup-azurite" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azuriteBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // Create and assert ca and tls certificate secrets on Azurite + By("creating ca and tls certificate secrets", func() { + err := backups.CreateCertificateSecretsOnAzurite( + env.Ctx, env.Client, + namespace, clusterName, + azuriteCaSecName, azuriteTLSSecName, + ) + Expect(err).ToNot(HaveOccurred()) + }) + // Setup Azurite and az cli along with Postgresql cluster + prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName) + }) + + It("restores a backed up cluster", func() { + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName) + }) + + // Create a scheduled backup with the 'immediate' option enabled. + // We expect the backup to be available + It("immediately starts a backup using ScheduledBackups immediate option", func() { + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupImmediateSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) + + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically("==", 2)) + }) + + It("backs up and restore a cluster with PITR Azurite", func() { + const ( + restoredClusterName = "restore-cluster-pitr-azurite" + backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" + ) + currentTimestamp := new(string) + + prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) + + cluster, err := backups.CreateClusterFromBackupUsingPITR( + env.Ctx, + env.Client, + env.Scheme, + namespace, + restoredClusterName, + backupFilePITR, + *currentTimestamp, + ) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") + + By("deleting the restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) + }) + }) + + // We create a cluster, create a scheduled backup, patch it to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) + Eventually(func() (int, error) { + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 60).Should(BeNumerically(">=", 3)) + }) + + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + }) +}) + +var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" + backupFileAzurite = fixturesBackupDir + "backup-azurite-02.yaml" + externalClusterFileAzurite = fixturesBackupDir + "external-clusters-azurite-03.yaml.template" + + azuriteCaSecName = "azurite-ca-secret" + azuriteTLSSecName = "azurite-tls-secret" + tableName = "to_restore" + ) + Context("using Azurite blobs as object storage", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + if IsAKS() { + Skip("This test is not run on AKS") + } + const namespacePrefix = "recovery-barman-object-azurite" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azuriteBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // Create and assert ca and tls certificate secrets on Azurite + By("creating ca and tls certificate secrets", func() { + err := backups.CreateCertificateSecretsOnAzurite( + env.Ctx, + env.Client, + namespace, + clusterName, + azuriteCaSecName, + azuriteTLSSecName, + ) + Expect(err).ToNot(HaveOccurred()) + }) + // Setup Azurite and az cli along with PostgreSQL cluster + prepareClusterBackupOnAzurite( + namespace, + clusterName, + azuriteBlobSampleFile, + backupFileAzurite, + tableName, + ) + }) + + It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ + " option in 'externalClusters' section", func() { + const ( + externalClusterRestoreName = "restore-external-cluster-pitr" + backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" + ) + currentTimestamp := new(string) + prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) + + // Create a cluster from a particular time using external backup. + restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzurite( + env.Ctx, env.Client, + namespace, externalClusterRestoreName, clusterName, *currentTimestamp) + Expect(err).NotTo(HaveOccurred()) + + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterRestoreName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) + }) + }) + }) +}) + +func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { + By("creating the Azurite storage credentials", func() { + err := backups.CreateStorageCredentialsOnAzurite(env.Ctx, env.Client, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting up Azurite to hold the backups", func() { + // Deploying azurite for blob storage + err := backups.InstallAzurite(env.Ctx, env.Client, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting up az-cli", func() { + // This is required as we have a service of Azurite running locally. + // In order to connect, we need az cli inside the namespace + err := backups.InstallAzCli(env.Ctx, env.Client, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + // Creating cluster + AssertCreateCluster(namespace, clusterName, clusterSampleFile, env) + + AssertArchiveConditionMet(namespace, clusterName, "5m") +} + +func prepareClusterBackupOnAzurite( + namespace, + clusterName, + clusterSampleFile, + backupFile, + tableName string, +) { + // Setting up Azurite and az cli along with Postgresql cluster + prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzurite(namespace, clusterName) + + By("backing up a cluster and verifying it exists on azurite", func() { + // We create a Backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFile, false, + testTimeouts[testUtils.BackupIsReady], + ) + // Verifying file called data.tar should be available on Azurite blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck + }, 30).Should(Succeed()) + }) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) +} + +func prepareClusterForPITROnAzurite( + namespace, + clusterName, + backupSampleFile string, + currentTimestamp *string, +) { + By("backing up a cluster and verifying it exists on azurite", func() { + // We create a Backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupSampleFile, false, + testTimeouts[testUtils.BackupIsReady], + ) + // Verifying file called data.tar should be available on Azurite blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck + }, 30).Should(Succeed()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "for_restore", + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable("for_restore", 3, conn) + }) + assertArchiveWalOnAzurite(namespace, clusterName) +} + +func assertArchiveWalOnAzurite(namespace, clusterName string) { + // Create a WAL on the primary and check if it arrives at the Azure Blob Storage within a short time + By("archiving WALs and verifying they exist", func() { + primary := clusterName + "-1" + latestWAL := switchWalAndGetLatestArchive(namespace, primary) + // verifying on blob storage using az + // Define what file we are looking for in Azurite. + // Escapes are required since az expects forward slashes to be escaped + path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) + // verifying on blob storage using az + Eventually(func() (int, error) { + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) + }, 60).Should(BeEquivalentTo(1)) + }) +} diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go new file mode 100644 index 0000000000..bf56b9a6ff --- /dev/null +++ b/tests/e2e/backup_restore_minio_test.go @@ -0,0 +1,852 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + "path/filepath" + + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + barmanCloudBackupLogEntry = "Starting barman-cloud-backup" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + Context("using minio as object storage for backup", Ordered, func() { + // This is a set of tests using a minio server deployed in the same + // namespace as the cluster. Since each cluster is installed in its + // own namespace, they can share the configuration file + var namespace, clusterName string + const ( + backupFile = fixturesDir + "/backup/minio/backup-minio.yaml" + customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml" + ) + + clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template" + + BeforeAll(func() { + if !IsLocal() { + Skip("This test is only run on local clusters") + } + const namespacePrefix = "cluster-backup-minio" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + By("create the certificates for MinIO", func() { + err := minioEnv.CreateCaSecret(env, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + By("creating the credentials for minio", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create ConfigMap and secrets to verify metrics for target database after backup restore + AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 1, 1) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) + + By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + if err != nil { + return false, err + } + return connectionStatus, nil + }, 60).Should(BeTrue()) + }) + }) + + // We back up and restore a cluster, and verify some expected data to + // be there + It("backs up and restores a cluster using minio", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" + ) + var backup *apiv1.Backup + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupFile) + Expect(err).ToNot(HaveOccurred()) + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), + func() { + backup = backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, backupFile, false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.LastSuccessfulBackup, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.LastFailedBackup, err //nolint:staticcheck + }, 30).Should(BeEmpty()) + }) + + By("verifying the backup is using the expected barman-cloud-backup options", func() { + Expect(backup).ToNot(BeNil()) + Expect(backup.Status.InstanceID).ToNot(BeNil()) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, namespace, + backup.Status.InstanceID.PodName, + ) + Expect(err).ToNot(HaveOccurred()) + expectedBaseBackupOptions := []string{ + "--immediate-checkpoint", + "--min-chunk-size=5MB", + "--read-timeout=59", + } + result, err := logs.CheckOptionsForBarmanCommand( + logEntries, + barmanCloudBackupLogEntry, + backup.Name, + backup.Status.InstanceID.PodName, + expectedBaseBackupOptions, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(BeTrue()) + }) + + By("executing a second backup and verifying the number of backups on minio", func() { + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + + // delete the first backup and create a second backup + backup := &apiv1.Backup{} + err := env.Client.Get(env.Ctx, + ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, + backup) + Expect(err).ToNot(HaveOccurred()) + err = env.Client.Delete(env.Ctx, backup) + Expect(err).ToNot(HaveOccurred()) + // create a second backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFile, false, + testTimeouts[timeouts.BackupIsReady], + ) + latestTar = minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(2)) + }) + + By("verifying the backupName is properly set in the status of the backup", func() { + backup := &apiv1.Backup{} + err := env.Client.Get(env.Ctx, + ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, + backup) + Expect(err).ToNot(HaveOccurred()) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + // We know that our current images always contain the latest barman version + if cluster.ShouldForceLegacyBackup() { + Expect(backup.Status.BackupName).To(BeEmpty()) + } else { + Expect(backup.Status.BackupName).To(HavePrefix("backup-")) + } + }) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName) + Expect(err).ToNot(HaveOccurred()) + AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) + + previous := 0 + latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz") + By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v", + latestGZ), func() { + previous, err = minio.CountFiles(minioEnv, latestGZ) + Expect(err).ToNot(HaveOccurred()) + }) + + AssertSwitchover(namespace, clusterName, env) + + By("checking the number of .history after switchover", func() { + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestGZ) + }, 60).Should(BeNumerically(">", previous)) + }) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // We backup and restore a cluster from a standby, and verify some expected data to + // be there + It("backs up and restore a cluster from standby", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterWithMinioStandbySampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-standby.yaml.template" + backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" + ) + + targetClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioStandbySampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) + latestTar := minio.GetFilePath(targetClusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", + latestTar), func() { + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupStandbyFile, true, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, targetClusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName) + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + }) + }) + + // We backup and restore a cluster from a standby, and verify some expected data to + // be there + It("backs up a cluster from standby with backup target defined in backup", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-primary.yaml.template" + backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" + ) + + targetClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) + latestTar := minio.GetFilePath(targetClusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ + " backup path is %v", latestTar), func() { + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupWithTargetFile, true, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, targetClusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName) + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + }) + + By("deleting the cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Test that the restore works if the source cluster has a custom + // backup.barmanObjectStore.serverName that is different from the cluster name + It("backs up and restores a cluster with custom backup serverName", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore-custom.yaml.template" + // clusterWithMinioCustomSampleFile has metadata.name != backup.barmanObjectStore.serverName + clusterWithMinioCustomSampleFile = fixturesDir + + "/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template" + backupFileCustom = fixturesDir + "/backup/minio/backup-minio-custom-servername.yaml" + clusterServerName = "pg-backup-minio-Custom-Name" + ) + + customClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioCustomSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: customClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) + + // There should be a backup resource and + By("backing up a cluster and verifying it exists on minio", func() { + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFileCustom, false, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, customClusterName) + latestBaseTar := minio.GetFilePath(clusterServerName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestBaseTar) + }, 60).Should(BeEquivalentTo(1), + fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) + // this is the second backup we take on the bucket + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, customClusterName) + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + }) + + // Restore backup in a new cluster + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + By("deleting the primary cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available + It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) + latestBaseTar := minio.GetFilePath(clusterName, "data.tar") + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestBaseTar) + }, 60).Should(BeNumerically(">=", 2), + fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar)) + }) + + It("backs up and restore a cluster with PITR MinIO", func() { + const ( + restoredClusterName = "restore-cluster-pitr-minio" + backupFilePITR = fixturesDir + "/backup/minio/backup-minio-pitr.yaml" + ) + currentTimestamp := new(string) + prepareClusterForPITROnMinio( + namespace, + clusterName, + backupFilePITR, + 3, + currentTimestamp, + ) + + cluster, err := backups.CreateClusterFromBackupUsingPITR( + env.Ctx, + env.Client, + env.Scheme, + namespace, + restoredClusterName, + backupFilePITR, + *currentTimestamp, + ) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") + + By("deleting the restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) + }) + }) + + // We create a cluster and a scheduled backup, then it is patched to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeNumerically(">=", 2), + fmt.Sprintf("verify the number of backup %v is great than 2", latestTar)) + }) + + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + + It("verify tags in backed files", func() { + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + tags, err := minio.GetFileTags(minioEnv, minio.GetFilePath(clusterName, "*1.gz")) + Expect(err).ToNot(HaveOccurred()) + Expect(tags.Tags).ToNot(BeEmpty()) + + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + oldPrimary := currentPrimary.GetName() + // Force-delete the primary + quickDelete := &ctrlclient.DeleteOptions{ + GracePeriodSeconds: &quickDeletionPeriod, + } + err = pods.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete) + Expect(err).ToNot(HaveOccurred()) + + AssertNewPrimary(namespace, clusterName, oldPrimary) + + tags, err = minio.GetFileTags(minioEnv, minio.GetFilePath(clusterName, "*.history.gz")) + Expect(err).ToNot(HaveOccurred()) + Expect(tags.Tags).ToNot(BeEmpty()) + }) + }) +}) + +var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + externalClusterFileMinioReplica = fixturesBackupDir + "external-clusters-minio-replica-04.yaml.template" + clusterSourceFileMinio = fixturesBackupDir + "source-cluster-minio-01.yaml.template" + externalClusterFileMinio = fixturesBackupDir + "external-clusters-minio-03.yaml.template" + sourceTakeFirstBackupFileMinio = fixturesBackupDir + "backup-minio-02.yaml" + sourceTakeSecondBackupFileMinio = fixturesBackupDir + "backup-minio-03.yaml" + sourceTakeThirdBackupFileMinio = fixturesBackupDir + "backup-minio-04.yaml" + tableName = "to_restore" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + // Restore cluster using a recovery object store, that is a backup of another cluster, + // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section + Context("using minio as object storage", Ordered, func() { + var namespace, clusterName string + + BeforeAll(func() { + if !IsLocal() { + Skip("This test is only executed on local") + } + const namespacePrefix = "recovery-barman-object-minio" + var err error + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileMinio) + Expect(err).ToNot(HaveOccurred()) + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + By("creating the credentials for minio", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) + }) + + By("create the certificates for MinIO", func() { + err := minioEnv.CreateCaSecret(env, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) + + By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + if err != nil { + return false, err + } + return connectionStatus, nil + }, 60).Should(BeTrue()) + }) + }) + + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + externalClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, externalClusterFileMinio) + Expect(err).ToNot(HaveOccurred()) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + // There should be a backup resource and + By("backing up a cluster and verifying it exists on minio", func() { + backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeFirstBackupFileMinio, + false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + + // TODO: this is to force a CHECKPOINT when we run the backup on standby. + // This should be better handled inside Execute + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1), + fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) + Eventually(func() (string, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck + }, 30).ShouldNot(BeEmpty()) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileMinio, tableName) + + // verify test data on restored external cluster + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: externalClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ + " option in 'externalClusters' section", func() { + externalClusterRestoreName := "restore-external-cluster-pitr" + + currentTimestamp := new(string) + // We have already written 2 rows in test table 'to_restore' in above test now we will take current + // timestamp. It will use to restore cluster from source using PITR + By("getting currentTimestamp", func() { + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // insert 2 more rows entries 3,4 on the "app" database + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) + }) + By("creating second backup and verifying it exists on minio", func() { + backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeSecondBackupFileMinio, + false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(2), + fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar)) + }) + var restoredCluster *apiv1.Cluster + By("create a cluster from backup with PITR", func() { + var err error + restoredCluster, err = backups.CreateClusterFromExternalClusterBackupWithPITROnMinio( + env.Ctx, env.Client, + namespace, externalClusterRestoreName, clusterName, *currentTimestamp) + Expect(err).NotTo(HaveOccurred()) + }) + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterRestoreName, + tableName, + "00000002", + ) + By("delete restored cluster", func() { + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) + }) + }) + + It("restore cluster from barman object using replica option in spec", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "for_restore_repl", + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + By("backing up a cluster and verifying it exists on minio", func() { + backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeThirdBackupFileMinio, false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(3), + fmt.Sprintf("verify the number of backup %v is great than 3", latestTar)) + }) + + // Replicating a cluster with asynchronous replication + AssertClusterAsyncReplica( + namespace, + clusterSourceFileMinio, + externalClusterFileMinioReplica, + "for_restore_repl", + ) + }) + }) +}) + +func prepareClusterForPITROnMinio( + namespace, + clusterName, + backupSampleFile string, + expectedVal int, + currentTimestamp *string, +) { + const tableNamePitr = "for_restore" + + By("backing up a cluster and verifying it exists on minio", func() { + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupSampleFile, false, + testTimeouts[timeouts.BackupIsReady], + ) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeNumerically(">=", expectedVal), + fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, + expectedVal)) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck + }, 30).Should(Succeed()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + insertRecordIntoTable(tableNamePitr, 3, conn) + }) + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + AssertArchiveConditionMet(namespace, clusterName, "5m") + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) +} diff --git a/tests/e2e/backup_restore_test.go b/tests/e2e/backup_restore_test.go deleted file mode 100644 index 05ae213875..0000000000 --- a/tests/e2e/backup_restore_test.go +++ /dev/null @@ -1,1163 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "fmt" - "path/filepath" - - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { - const ( - level = tests.High - - azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" - - tableName = "to_restore" - - barmanCloudBackupLogEntry = "Starting barman-cloud-backup" - ) - - currentTimestamp := new(string) - - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - Context("using minio as object storage for backup", Ordered, func() { - // This is a set of tests using a minio server deployed in the same - // namespace as the cluster. Since each cluster is installed in its - // own namespace, they can share the configuration file - var namespace, clusterName string - const ( - backupFile = fixturesDir + "/backup/minio/backup-minio.yaml" - customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml" - ) - - clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template" - - BeforeAll(func() { - if !IsLocal() { - Skip("This test is only run on local clusters") - } - const namespacePrefix = "cluster-backup-minio" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - By("create the certificates for MinIO", func() { - err := minioEnv.CreateCaSecret(env, namespace) - Expect(err).ToNot(HaveOccurred()) - }) - - By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") - }) - - // Create ConfigMap and secrets to verify metrics for target database after backup restore - AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 1, 1) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) - - By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) - if err != nil { - return false, err - } - return connectionStatus, nil - }, 60).Should(BeTrue()) - }) - }) - - // We backup and restore a cluster, and verify some expected data to - // be there - It("backs up and restores a cluster using minio", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" - ) - var backup *apiv1.Backup - restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - backupName, err := env.GetResourceNameFromYAML(backupFile) - Expect(err).ToNot(HaveOccurred()) - // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBSecret, testTableName, psqlClientPod) - - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - latestTar := minioPath(clusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), func() { - backup = testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastSuccessfulBackup, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastFailedBackup, err - }, 30).Should(BeEmpty()) - }) - - By("verifying the backup is using the expected barman-cloud-backup options", func() { - Expect(backup).ToNot(BeNil()) - Expect(backup.Status.InstanceID).ToNot(BeNil()) - logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env) - Expect(err).ToNot(HaveOccurred()) - expectedBaseBackupOptions := []string{ - "--immediate-checkpoint", - "--min-chunk-size=5MB", - "--read-timeout=59", - } - result, err := testUtils.CheckOptionsForBarmanCommand( - logEntries, - barmanCloudBackupLogEntry, - backup.Name, - backup.Status.InstanceID.PodName, - expectedBaseBackupOptions, - ) - Expect(err).ToNot(HaveOccurred()) - Expect(result).To(BeTrue()) - }) - - By("executing a second backup and verifying the number of backups on minio", func() { - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - - // delete the first backup and create a second backup - backup := &apiv1.Backup{} - err := env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, - backup) - Expect(err).ToNot(HaveOccurred()) - err = env.Client.Delete(env.Ctx, backup) - Expect(err).ToNot(HaveOccurred()) - // create a second backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - latestTar = minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(2)) - }) - - By("verifying the backupName is properly set in the status of the backup", func() { - backup := &apiv1.Backup{} - err := env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, - backup) - Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - // We know that our current images always contain the latest barman version - if cluster.ShouldForceLegacyBackup() { - Expect(backup.Status.BackupName).To(BeEmpty()) - } else { - Expect(backup.Status.BackupName).To(HavePrefix("backup-")) - } - }) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) - - cluster, err := env.GetCluster(namespace, restoredClusterName) - Expect(err).ToNot(HaveOccurred()) - AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) - - previous := 0 - latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz") - By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v", - latestGZ), func() { - previous, err = testUtils.CountFilesOnMinio(minioEnv, latestGZ) - Expect(err).ToNot(HaveOccurred()) - }) - - AssertSwitchover(namespace, clusterName, env) - - By("checking the number of .history after switchover", func() { - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestGZ) - }, 60).Should(BeNumerically(">", previous)) - }) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // We backup and restore a cluster from a standby, and verify some expected data to - // be there - It("backs up and restore a cluster from standby", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterWithMinioStandbySampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-standby.yaml.template" - backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" - ) - - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBSecret, testTableName, psqlClientPod) - - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, targetClusterName, tableName, psqlClientPod) - - AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) - latestTar := minioPath(targetClusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", - latestTar), func() { - testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, targetClusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - }) - - // We backup and restore a cluster from a standby, and verify some expected data to - // be there - It("backs up a cluster from standby with backup target defined in backup", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-primary.yaml.template" - backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" - ) - - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBSecret, testTableName, psqlClientPod) - - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, targetClusterName, tableName, psqlClientPod) - - AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) - latestTar := minioPath(targetClusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ - " backup path is %v", latestTar), func() { - testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, targetClusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - By("deleting the cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Test that the restore works if the source cluster has a custom - // backup.barmanObjectStore.serverName that is different than the cluster name - It("backs up and restores a cluster with custom backup serverName", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore-custom.yaml.template" - // clusterWithMinioCustomSampleFile has metadata.name != backup.barmanObjectStore.serverName - clusterWithMinioCustomSampleFile = fixturesDir + - "/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template" - backupFileCustom = fixturesDir + "/backup/minio/backup-minio-custom-servername.yaml" - clusterServerName = "pg-backup-minio-Custom-Name" - ) - - customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, customClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, customClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, customClusterName, targetDBSecret, testTableName, psqlClientPod) - - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, customClusterName, tableName, psqlClientPod) - - AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) - - // There should be a backup resource and - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, customClusterName) - latestBaseTar := minioPath(clusterServerName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar) - }, 60).Should(BeEquivalentTo(1), - fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) - // this is the second backup we take on the bucket - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, customClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) - - By("deleting the primary cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available - It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) - latestBaseTar := minioPath(clusterName, "data.tar") - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar) - }, 60).Should(BeNumerically(">=", 2), - fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar)) - }) - - It("backs up and restore a cluster with PITR MinIO", func() { - const ( - restoredClusterName = "restore-cluster-pitr-minio" - backupFilePITR = fixturesDir + "/backup/minio/backup-minio-pitr.yaml" - ) - - prepareClusterForPITROnMinio( - namespace, - clusterName, - backupFilePITR, - 3, - currentTimestamp, - psqlClientPod, - ) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFilePITR, - *currentTimestamp, - env, - ) - Expect(err).NotTo(HaveOccurred()) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003", psqlClientPod) - - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster and a scheduled backup, then it is patched to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeNumerically(">=", 2), - fmt.Sprintf("verify the number of backup %v is great than 2", latestTar)) - }) - - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - - It("verify tags in backed files", func() { - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - tags, err := testUtils.GetFileTagsOnMinio(minioEnv, minioPath(clusterName, "*1.gz")) - Expect(err).ToNot(HaveOccurred()) - Expect(tags.Tags).ToNot(BeEmpty()) - - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - oldPrimary := currentPrimary.GetName() - // Force-delete the primary - quickDelete := &ctrlclient.DeleteOptions{ - GracePeriodSeconds: &quickDeletionPeriod, - } - err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) - Expect(err).ToNot(HaveOccurred()) - - AssertNewPrimary(namespace, clusterName, oldPrimary) - - tags, err = testUtils.GetFileTagsOnMinio(minioEnv, minioPath(clusterName, "*.history.gz")) - Expect(err).ToNot(HaveOccurred()) - Expect(tags.Tags).ToNot(BeEmpty()) - }) - }) - - Context("using azure blobs as object storage with storage account access authentication", Ordered, func() { - // We must be careful here. All the clusters use the same remote storage - // and that means that we must use different cluster names otherwise - // we risk mixing WALs and backups - const azureBlobSampleFile = fixturesDir + "/backup/azure_blob/cluster-with-backup-azure-blob.yaml.template" - const clusterRestoreSampleFile = fixturesDir + "/backup/azure_blob/cluster-from-restore.yaml.template" - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azure-blob.yaml" - backupFile := fixturesDir + "/backup/azure_blob/backup-azure-blob.yaml" - var namespace, clusterName string - - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only run on AKS clusters") - } - const namespacePrefix = "cluster-backup-azure-blob" - var err error - clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test. - // The credentials are retrieved from the environment variables, as we can't create - // a fixture for them - By("creating the Azure Blob Storage credentials", func() { - AssertStorageCredentialsAreCreated( - namespace, - "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey, - ) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, azureBlobSampleFile, env) - }) - - // We backup and restore a cluster, and verify some expected data to - // be there - It("backs up and restore a cluster", func() { - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - By("uploading a backup", func() { - // We create a backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) - - By("deleting the restored cluster", func() { - err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available - It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) - - // Only one data.tar files should be present - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, - clusterName, "data.tar") - }, 30).Should(BeNumerically("==", 2)) - }) - - It("backs up and restore a cluster with PITR", func() { - restoredClusterName := "restore-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - backupFile, - env.AzureConfiguration, - 2, - currentTimestamp, - psqlClientPod) - - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR(namespace, restoredClusterName, - backupFile, *currentTimestamp, env) - Expect(err).ToNot(HaveOccurred()) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002", psqlClientPod) - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster, create a scheduled backup, patch it to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 480) - - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, - clusterName, "data.tar") - }, 60).Should(BeNumerically(">=", 2)) - }) - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - }) - - Context("using Azurite blobs as object storage", Ordered, func() { - // This is a set of tests using an Azurite server deployed in the same - // namespace as the cluster. Since each cluster is installed in its - // own namespace, they can share the configuration file - const ( - clusterRestoreSampleFile = fixturesDir + "/backup/azurite/cluster-from-restore.yaml.template" - scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azurite.yaml" - scheduledBackupImmediateSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azurite.yaml" - backupFile = fixturesDir + "/backup/azurite/backup.yaml" - azuriteCaSecName = "azurite-ca-secret" - azuriteTLSSecName = "azurite-tls-secret" - ) - var namespace, clusterName string - - BeforeAll(func() { - if !(IsLocal() || IsGKE() || IsOpenshift()) { - Skip("This test is only executed on gke, openshift and local") - } - const namespacePrefix = "cluster-backup-azurite" - var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // Create and assert ca and tls certificate secrets on Azurite - By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName, - azuriteCaSecName, azuriteTLSSecName, env) - Expect(err).ToNot(HaveOccurred()) - }) - // Setup Azurite and az cli along with Postgresql cluster - prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName, psqlClientPod) - }) - - It("restores a backed up cluster", func() { - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) - }) - - // Create a scheduled backup with the 'immediate' option enabled. - // We expect the backup to be available - It("immediately starts a backup using ScheduledBackups immediate option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) - - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically("==", 2)) - }) - - It("backs up and restore a cluster with PITR Azurite", func() { - const ( - restoredClusterName = "restore-cluster-pitr-azurite" - backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" - ) - - prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp, psqlClientPod) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFilePITR, - *currentTimestamp, - env, - ) - Expect(err).NotTo(HaveOccurred()) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002", psqlClientPod) - - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster, create a scheduled backup, patch it to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 60).Should(BeNumerically(">=", 3)) - }) - - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - }) -}) - -var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { - const ( - fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" - azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" - externalClusterFileMinio = fixturesBackupDir + "external-clusters-minio-03.yaml.template" - externalClusterFileMinioReplica = fixturesBackupDir + "external-clusters-minio-replica-04.yaml.template" - sourceTakeFirstBackupFileMinio = fixturesBackupDir + "backup-minio-02.yaml" - sourceTakeSecondBackupFileMinio = fixturesBackupDir + "backup-minio-03.yaml" - sourceTakeThirdBackupFileMinio = fixturesBackupDir + "backup-minio-04.yaml" - clusterSourceFileMinio = fixturesBackupDir + "source-cluster-minio-01.yaml.template" - sourceBackupFileAzure = fixturesBackupDir + "backup-azure-blob-02.yaml" - clusterSourceFileAzure = fixturesBackupDir + "source-cluster-azure-blob-01.yaml.template" - externalClusterFileAzure = fixturesBackupDir + "external-clusters-azure-blob-03.yaml.template" - sourceBackupFileAzurePITR = fixturesBackupDir + "backup-azure-blob-pitr.yaml" - externalClusterFileAzurite = fixturesBackupDir + "external-clusters-azurite-03.yaml.template" - backupFileAzurite = fixturesBackupDir + "backup-azurite-02.yaml" - tableName = "to_restore" - clusterSourceFileAzureSAS = fixturesBackupDir + "cluster-with-backup-azure-blob-sas.yaml.template" - clusterRestoreFileAzureSAS = fixturesBackupDir + "cluster-from-restore-sas.yaml.template" - sourceBackupFileAzureSAS = fixturesBackupDir + "backup-azure-blob-sas.yaml" - sourceBackupFileAzurePITRSAS = fixturesBackupDir + "backup-azure-blob-pitr-sas.yaml" - level = tests.High - minioCaSecName = "minio-server-ca-secret" - minioTLSSecName = "minio-server-tls-secret" - azuriteCaSecName = "azurite-ca-secret" - azuriteTLSSecName = "azurite-tls-secret" - ) - - currentTimestamp := new(string) - - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - // Restore cluster using a recovery object store, that is a backup of another cluster, - // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section - Context("using minio as object storage", Ordered, func() { - var namespace, clusterName string - - BeforeAll(func() { - if !IsLocal() { - Skip("This test is only executed on local") - } - const namespacePrefix = "recovery-barman-object-minio" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") - - By("create the certificates for MinIO", func() { - err := minioEnv.CreateCaSecret(env, namespace) - Expect(err).ToNot(HaveOccurred()) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) - - By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) - if err != nil { - return false, err - } - return connectionStatus, nil - }, 60).Should(BeTrue()) - }) - }) - - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio) - Expect(err).ToNot(HaveOccurred()) - - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - // There should be a backup resource and - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - - // TODO: this is to force a CHECKPOINT when we run the backup on standby. - // This should be better handled inside ExecuteBackup - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1), - fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileMinio, tableName, psqlClientPod) - - // verify test data on restored external cluster - AssertDataExpectedCount(namespace, externalClusterName, tableName, 2, psqlClientPod) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ - " option in 'externalClusters' section", func() { - externalClusterRestoreName := "restore-external-cluster-pitr" - // We have already written 2 rows in test table 'to_restore' in above test now we will take current - // timestamp. It will use to restore cluster from source using PITR - - By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env, psqlClientPod) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { - // insert 2 more rows entries 3,4 on the "app" database - insertRecordIntoTable(namespace, clusterName, tableName, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, tableName, 4, psqlClientPod) - }) - By("creating second backup and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(2), - fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar)) - }) - var restoredCluster *apiv1.Cluster - By("create a cluster from backup with PITR", func() { - var err error - restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) - Expect(err).NotTo(HaveOccurred()) - }) - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterRestoreName, - tableName, - "00000002", - psqlClientPod) - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - - It("restore cluster from barman object using replica option in spec", func() { - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, "for_restore_repl", psqlClientPod) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(3), - fmt.Sprintf("verify the number of backup %v is great than 3", latestTar)) - }) - - // Replicating a cluster with asynchronous replication - AssertClusterAsyncReplica( - namespace, - clusterSourceFileMinio, - externalClusterFileMinioReplica, - "for_restore_repl", - psqlClientPod) - }) - }) - - Context("using azure blobs as object storage", func() { - Context("storage account access authentication", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only executed on AKS clusters") - } - const namespacePrefix = "recovery-barman-object-azure" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test. - // The credentials are retrieved from the environment variables, as we can't create - // a fixture for them - By("creating the Azure Blob Storage credentials", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", - env.AzureConfiguration.StorageAccount, env.AzureConfiguration.StorageKey) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) - }) - - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // Create the backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - }) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileAzure, tableName, psqlClientPod) - }) - - It("restores a cluster with 'PITR' from barman object using "+ - "'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName := "external-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - sourceBackupFileAzurePITR, - env.AzureConfiguration, - 1, - currentTimestamp, - psqlClientPod) - - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - clusterName, - *currentTimestamp, - "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) - Expect(err).ToNot(HaveOccurred()) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, externalClusterName, - tableName, "00000002", psqlClientPod) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) - - Context("storage account SAS Token authentication", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only executed on AKS clusters") - } - const namespacePrefix = "cluster-backup-azure-blob-sas" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test, - // we get the credentials from the environment variables as we can't create - // a fixture for them - By("creating the Azure Blob Container SAS Token credentials", func() { - AssertCreateSASTokenCredentials(namespace, env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey) - }) - - // Create the Cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) - }) - - It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - - // Create a WAL on the primary and check if it arrives in the - // Azure Blob Storage within a short time - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // We create a Backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - }) - - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName, psqlClientPod) - }) - - It("restores a cluster with 'PITR' from barman object using "+ - "'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName := "external-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - sourceBackupFileAzurePITRSAS, - env.AzureConfiguration, - 1, - currentTimestamp, - psqlClientPod) - - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - clusterName, - *currentTimestamp, - "backup-storage-creds-sas", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) - Expect(err).ToNot(HaveOccurred()) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, externalClusterName, - tableName, "00000002", psqlClientPod) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) - }) - - Context("using Azurite blobs as object storage", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if IsAKS() { - Skip("This test is not run on AKS") - } - const namespacePrefix = "recovery-barman-object-azurite" - var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // Create and assert ca and tls certificate secrets on Azurite - By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite( - namespace, - clusterName, - azuriteCaSecName, - azuriteTLSSecName, - env) - Expect(err).ToNot(HaveOccurred()) - }) - // Setup Azurite and az cli along with PostgreSQL cluster - prepareClusterBackupOnAzurite( - namespace, - clusterName, - azuriteBlobSampleFile, - backupFileAzurite, - tableName, - psqlClientPod) - }) - - It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName, psqlClientPod) - }) - - It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ - " option in 'externalClusters' section", func() { - const ( - externalClusterRestoreName = "restore-external-cluster-pitr" - backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" - ) - - prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp, psqlClientPod) - - // Create a cluster from a particular time using external backup. - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) - Expect(err).NotTo(HaveOccurred()) - - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterRestoreName, - tableName, - "00000002", - psqlClientPod) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) -}) diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go index 5c419acb8a..6c553623f3 100644 --- a/tests/e2e/certificates_test.go +++ b/tests/e2e/certificates_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,17 +13,28 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( + "context" "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,6 +53,95 @@ import ( // from an application, by using certificates that have been created by 'kubectl-cnpg' // Then we verify that the server certificate and the operator are able to handle the provided server certificates var _ = Describe("Certificates", func() { + createClientCertificatesViaKubectlPluginFunc := func( + ctx context.Context, + crudClient ctrlclient.Client, + cluster apiv1.Cluster, + certName string, + userName string, + ) error { + // clientCertName := "cluster-cert" + // user := "app" + // Create the certificate + _, _, err := run.Run(fmt.Sprintf( + "kubectl cnpg certificate %v --cnpg-cluster %v --cnpg-user %v -n %v", + certName, + cluster.Name, + userName, + cluster.Namespace)) + if err != nil { + return err + } + // Verifying client certificate secret existence + secret := &corev1.Secret{} + err = crudClient.Get(ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: certName}, secret) + return err + } + + defaultPodFunc := func(namespace string, name string, rootCASecretName string, tlsSecretName string) corev1.Pod { + var secretMode int32 = 0o600 + seccompProfile := &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } + + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "secret-volume-root-ca", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: rootCASecretName, + DefaultMode: &secretMode, + }, + }, + }, + { + Name: "secret-volume-tls", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: tlsSecretName, + DefaultMode: &secretMode, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: name, + Image: "ghcr.io/cloudnative-pg/webtest:1.7.0", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "secret-volume-root-ca", + MountPath: "/etc/secrets/ca", + }, + { + Name: "secret-volume-tls", + MountPath: "/etc/secrets/tls", + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + SeccompProfile: seccompProfile, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: seccompProfile, + }, + }, + } + } + const ( serverCASecretName = "my-postgresql-server-ca" // #nosec serverCertSecretName = "my-postgresql-server" // #nosec @@ -67,7 +168,7 @@ var _ = Describe("Certificates", func() { cleanClusterCertification := func() { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.Certificates.ServerTLSSecret = "" cluster.Spec.Certificates.ServerCASecret = "" @@ -82,20 +183,21 @@ var _ = Describe("Certificates", func() { var err error // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "postgresql-cert" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) // Create the client certificate - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - err = utils.CreateClientCertificatesViaKubectlPlugin( + err = createClientCertificatesViaKubectlPluginFunc( + env.Ctx, + env.Client, *cluster, kubectlCNPGClientCertSecretName, "app", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -106,96 +208,99 @@ var _ = Describe("Certificates", func() { It("can authenticate using a Certificate that is generated from the 'kubectl-cnpg' plugin", Label(tests.LabelPlugin), func() { - pod := utils.DefaultWebapp(namespace, "app-pod-cert-1", + pod := defaultPodFunc(namespace, "app-pod-cert-1", defaultCASecretName, kubectlCNPGClientCertSecretName) - err := utils.PodCreateAndWaitForReady(env, &pod, 240) + err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) - It("can authenticate after switching to user-supplied server certs", Label(tests.LabelServiceConnectivity), func() { - CreateAndAssertServerCertificatesSecrets( - namespace, - clusterName, - serverCASecretName, - serverCertSecretName, - false, - ) + It("can authenticate after switching to user-supplied server certs", Label(tests.LabelServiceConnectivity), + func() { + CreateAndAssertServerCertificatesSecrets( + namespace, + clusterName, + serverCASecretName, + serverCertSecretName, + false, + ) - var err error - // Updating defaults certificates entries with user provided certificates, - // i.e server CA and TLS secrets inside the cluster - Eventually(func() error { - _, _, err = utils.RunUnchecked(fmt.Sprintf( - "kubectl patch cluster %v -n %v -p "+ - "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+ - "\"serverTLSSecret\":\"%v\"}}}'"+ - " --type='merge'", clusterName, namespace, serverCASecretName, serverCertSecretName)) - if err != nil { - return err - } - return nil - }, 60, 5).Should(BeNil()) - - Eventually(func() (bool, error) { - certUpdateStatus := false - cluster, err := env.GetCluster(namespace, clusterName) - if cluster.Status.Certificates.ServerCASecret == serverCASecretName { - if cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName { - certUpdateStatus = true + var err error + // Updating defaults certificates entries with user provided certificates, + // i.e server CA and TLS secrets inside the cluster + Eventually(func() error { + _, _, err = run.Unchecked(fmt.Sprintf( + "kubectl patch cluster %v -n %v -p "+ + "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+ + "\"serverTLSSecret\":\"%v\"}}}'"+ + " --type='merge'", clusterName, namespace, serverCASecretName, serverCertSecretName)) + if err != nil { + return err } - } - return certUpdateStatus, err - }, 120).Should(BeTrue(), fmt.Sprintf("Error: %v", err)) + return nil + }, 60, 5).Should(Succeed()) - pod := utils.DefaultWebapp( - namespace, - "app-pod-cert-2", - serverCASecretName, - kubectlCNPGClientCertSecretName, - ) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) - Expect(err).ToNot(HaveOccurred()) - AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) - }) + Eventually(func() (bool, error) { + certUpdateStatus := false + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + if cluster.Status.Certificates.ServerCASecret == serverCASecretName { + if cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName { + certUpdateStatus = true + } + } + return certUpdateStatus, err + }, 120).Should(BeTrue(), fmt.Sprintf("Error: %v", err)) - It("can connect after switching to user-supplied client certificates", Label(tests.LabelServiceConnectivity), func() { - // Create certificates secret for client - CreateAndAssertClientCertificatesSecrets(namespace, clusterName, clientCASecretName, replicaCertSecretName, - clientCertSecretName, false) - - // Updating defaults certificates entries with user provided certificates, - // i.e client CA and TLS secrets inside the cluster - Eventually(func() error { - _, _, err := utils.RunUnchecked(fmt.Sprintf( - "kubectl patch cluster %v -n %v -p "+ - "'{\"spec\":{\"certificates\":{\"clientCASecret\":\"%v\","+ - "\"replicationTLSSecret\":\"%v\"}}}'"+ - " --type='merge'", clusterName, namespace, clientCASecretName, replicaCertSecretName)) - if err != nil { - return err - } - return nil - }, 60, 5).Should(BeNil()) - - Eventually(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) - return cluster.Spec.Certificates.ClientCASecret == clientCASecretName && - cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err - }, 120, 5).Should(BeTrue()) - - pod := utils.DefaultWebapp(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) - err := utils.PodCreateAndWaitForReady(env, &pod, 240) - Expect(err).ToNot(HaveOccurred()) - AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) - }) + pod := defaultPodFunc( + namespace, + "app-pod-cert-2", + serverCASecretName, + kubectlCNPGClientCertSecretName, + ) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) + Expect(err).ToNot(HaveOccurred()) + AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) + }) + + It("can connect after switching to user-supplied client certificates", Label(tests.LabelServiceConnectivity), + func() { + // Create certificates secret for client + CreateAndAssertClientCertificatesSecrets(namespace, clusterName, clientCASecretName, + replicaCertSecretName, + clientCertSecretName, false) + + // Updating defaults certificates entries with user provided certificates, + // i.e client CA and TLS secrets inside the cluster + Eventually(func() error { + _, _, err := run.Unchecked(fmt.Sprintf( + "kubectl patch cluster %v -n %v -p "+ + "'{\"spec\":{\"certificates\":{\"clientCASecret\":\"%v\","+ + "\"replicationTLSSecret\":\"%v\"}}}'"+ + " --type='merge'", clusterName, namespace, clientCASecretName, replicaCertSecretName)) + if err != nil { + return err + } + return nil + }, 60, 5).Should(Succeed()) + + Eventually(func() (bool, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + return cluster.Spec.Certificates.ClientCASecret == clientCASecretName && + cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err + }, 120, 5).Should(BeTrue()) + + pod := defaultPodFunc(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) + err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) + Expect(err).ToNot(HaveOccurred()) + AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) + }) It("can connect after switching both server and client certificates to user-supplied mode", Label(tests.LabelServiceConnectivity), func() { // Updating defaults certificates entries with user provided certificates, // i.e server and client CA and TLS secrets inside the cluster Eventually(func() error { - _, _, err := utils.RunUnchecked(fmt.Sprintf( + _, _, err := run.Unchecked(fmt.Sprintf( "kubectl patch cluster %v -n %v -p "+ "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+ "\"serverTLSSecret\":\"%v\",\"clientCASecret\":\"%v\","+ @@ -212,18 +317,18 @@ var _ = Describe("Certificates", func() { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) Eventually(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.Certificates.ServerCASecret == serverCASecretName && cluster.Status.Certificates.ClientCASecret == clientCASecretName && cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName && cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err }, 120, 5).Should(BeTrue()) - pod := utils.DefaultWebapp(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) - err := utils.PodCreateAndWaitForReady(env, &pod, 240) + pod := defaultPodFunc(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) + err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) @@ -242,7 +347,7 @@ var _ = Describe("Certificates", func() { var err error // Create a cluster in a namespace that will be deleted after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) CreateAndAssertServerCertificatesSecrets( namespace, @@ -252,23 +357,24 @@ var _ = Describe("Certificates", func() { false, ) AssertCreateCluster(namespace, clusterName, sampleFile, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - err = utils.CreateClientCertificatesViaKubectlPlugin( + err = createClientCertificatesViaKubectlPluginFunc( + env.Ctx, + env.Client, *cluster, kubectlCNPGClientCertSecretName, "app", - env, ) Expect(err).ToNot(HaveOccurred()) - pod := utils.DefaultWebapp( + pod := defaultPodFunc( namespace, "app-pod-cert-2", serverCASecretName, kubectlCNPGClientCertSecretName, ) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) @@ -287,7 +393,7 @@ var _ = Describe("Certificates", func() { var err error // Create a cluster in a namespace that will be deleted after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create certificates secret for client @@ -300,8 +406,8 @@ var _ = Describe("Certificates", func() { false, ) AssertCreateCluster(namespace, clusterName, sampleFile, env) - pod := utils.DefaultWebapp(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) + pod := defaultPodFunc(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) @@ -320,7 +426,7 @@ var _ = Describe("Certificates", func() { // Create a cluster in a namespace that will be deleted after the test var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create certificates secret for server @@ -341,8 +447,8 @@ var _ = Describe("Certificates", func() { false, ) AssertCreateCluster(namespace, clusterName, sampleFile, env) - pod := utils.DefaultWebapp(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) + pod := defaultPodFunc(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go new file mode 100644 index 0000000000..178dd7e063 --- /dev/null +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -0,0 +1,433 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade), func() { + const ( + level = tests.Medium + namespacePrefix = "cluster-major-upgrade" + postgisEntry = "postgis" + postgresqlEntry = "postgresql" + postgresqlMinimalEntry = "postgresql-minimal" + customImageRegistryEnvVar = "MAJOR_UPGRADE_IMAGE_REGISTRY" + ) + + var namespace string + + type scenario struct { + startingCluster *v1.Cluster + startingMajor int + targetImage string + targetMajor int + } + scenarios := map[string]*scenario{} + + generateBaseCluster := func(namespace string, storageClass string) *v1.Cluster { + return &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg-major-upgrade", + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + Bootstrap: &v1.BootstrapConfiguration{ + InitDB: &v1.BootstrapInitDB{ + DataChecksums: ptr.To(true), + WalSegmentSize: 32, + }, + }, + StorageConfiguration: v1.StorageConfiguration{ + StorageClass: &storageClass, + Size: "1Gi", + }, + WalStorage: &v1.StorageConfiguration{ + StorageClass: &storageClass, + Size: "1Gi", + }, + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1000", + "log_replication_commands": "on", + }, + }, + }, + } + } + + generatePostgreSQLCluster := func(namespace string, storageClass string, tagVersion string) *v1.Cluster { + cluster := generateBaseCluster(namespace, storageClass) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-standard-bookworm", tagVersion) + cluster.Spec.Bootstrap.InitDB.PostInitSQL = []string{ + "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", + "CREATE EXTENSION IF NOT EXISTS pg_trgm;", + } + cluster.Spec.PostgresConfiguration.Parameters["pg_stat_statements.track"] = "top" + return cluster + } + generatePostgreSQLMinimalCluster := func(namespace string, storageClass string, tagVersion string) *v1.Cluster { + cluster := generatePostgreSQLCluster(namespace, storageClass, tagVersion) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-minimal-bookworm", tagVersion) + return cluster + } + + generatePostGISCluster := func(namespace string, storageClass string, tagVersion string) *v1.Cluster { + cluster := generateBaseCluster(namespace, storageClass) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgis:%s", tagVersion) + cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQL = []string{ + "CREATE EXTENSION postgis", + "CREATE EXTENSION postgis_raster", + "CREATE EXTENSION postgis_sfcgal", + "CREATE EXTENSION fuzzystrmatch", + "CREATE EXTENSION address_standardizer", + "CREATE EXTENSION address_standardizer_data_us", + "CREATE EXTENSION postgis_tiger_geocoder", + "CREATE EXTENSION postgis_topology", + "CREATE TABLE geometries (name varchar, geom geometry)", + "INSERT INTO geometries VALUES" + + " ('Point', 'POINT(0 0)')," + + " ('Linestring', 'LINESTRING(0 0, 1 1, 2 1, 2 2)')," + + " ('Polygon', 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))')," + + " ('PolygonWithHole', 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1))')," + + " ('Collection', 'GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)))');", + } + return cluster + } + + type versionInfo struct { + currentMajor uint64 + currentTag string + targetMajor uint64 + targetTag string + } + + determineVersionsForTesting := func() versionInfo { + currentImage := os.Getenv("POSTGRES_IMG") + Expect(currentImage).ToNot(BeEmpty()) + + currentVersion, err := version.FromTag(reference.New(currentImage).Tag) + Expect(err).NotTo(HaveOccurred()) + currentMajor := currentVersion.Major() + currentTag := strconv.FormatUint(currentMajor, 10) + + targetVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + targetMajor := targetVersion.Major() + targetTag := strconv.FormatUint(targetMajor, 10) + + // If same version, choose a previous one for testing + if currentMajor == targetMajor { + currentMajor = targetMajor - (uint64(rand.Int() % 4)) - 1 + GinkgoWriter.Printf("Using %v as the current major version instead.\n", currentMajor) + } + + // This means we are on a beta version, so we can just invert the versions + if currentMajor > targetMajor { + currentMajor, targetMajor = targetMajor, currentMajor + currentTag = targetTag + // Beta images don't have a major version only tag yet, and + // are most likely in the following format: "18beta1", "18rc2" + // So, we split at the first `-` and use that prefix to build the target image. + targetTag = strings.Split(reference.New(currentImage).Tag, "-")[0] + GinkgoWriter.Printf("Using %v as the current major and upgrading to %v.\n", currentMajor, targetMajor) + } + + return versionInfo{ + currentMajor: currentMajor, + currentTag: currentTag, + targetMajor: targetMajor, + targetTag: targetTag, + } + } + + // generateTargetImages, given a targetMajor, generates a target image for each buildScenario. + // MAJOR_UPGRADE_IMAGE_REPO env allows to customize the target image repository. + generateTargetImages := func(targetTag string) map[string]string { + const ( + // ImageRepository is the default repository for Postgres container images + ImageRepository = "ghcr.io/cloudnative-pg/postgresql" + + // PostgisImageRepository is the default repository for Postgis container images + PostgisImageRepository = "ghcr.io/cloudnative-pg/postgis" + ) + + // Default target Images + targetImages := map[string]string{ + postgisEntry: fmt.Sprintf("%v:%v", PostgisImageRepository, targetTag), + postgresqlEntry: fmt.Sprintf("%v:%v-standard-bookworm", ImageRepository, targetTag), + postgresqlMinimalEntry: fmt.Sprintf("%v:%v-minimal-bookworm", ImageRepository, targetTag), + } + // Set custom targets when detecting a given env variable + if envValue := os.Getenv(customImageRegistryEnvVar); envValue != "" { + targetImages[postgisEntry] = fmt.Sprintf("%v:%v-postgis-bookworm", envValue, targetTag) + targetImages[postgresqlEntry] = fmt.Sprintf("%v:%v-standard-bookworm", envValue, targetTag) + targetImages[postgresqlMinimalEntry] = fmt.Sprintf("%v:%v-minimal-bookworm", envValue, targetTag) + } + + return targetImages + } + + buildScenarios := func( + namespace string, storageClass string, info versionInfo, + ) map[string]*scenario { + targetImages := generateTargetImages(info.targetTag) + + return map[string]*scenario{ + postgisEntry: { + startingCluster: generatePostGISCluster(namespace, storageClass, strconv.FormatUint(info.currentMajor, 10)), + startingMajor: int(info.currentMajor), + targetImage: targetImages[postgisEntry], + targetMajor: int(info.targetMajor), + }, + postgresqlEntry: { + startingCluster: generatePostgreSQLCluster(namespace, storageClass, + strconv.FormatUint(info.currentMajor, 10)), + startingMajor: int(info.currentMajor), + targetImage: targetImages[postgresqlEntry], + targetMajor: int(info.targetMajor), + }, + postgresqlMinimalEntry: { + startingCluster: generatePostgreSQLMinimalCluster(namespace, storageClass, + strconv.FormatUint(info.currentMajor, 10)), + startingMajor: int(info.currentMajor), + targetImage: targetImages[postgresqlMinimalEntry], + targetMajor: int(info.targetMajor), + }, + } + } + + verifyPodsChanged := func( + ctx context.Context, client client.Client, cluster *v1.Cluster, oldPodsUUIDs []types.UID, + ) { + Eventually(func(g Gomega) { + podList, err := clusterutils.ListPods(ctx, client, cluster.Name, cluster.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(podList.Items).To(HaveLen(len(oldPodsUUIDs))) + for _, pod := range podList.Items { + g.Expect(oldPodsUUIDs).NotTo(ContainElement(pod.UID)) + } + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + + verifyPVCsChanged := func( + ctx context.Context, client client.Client, cluster *v1.Cluster, oldPVCsUUIDs []types.UID, + ) { + Eventually(func(g Gomega) { + pvcList, err := storage.GetPVCList(ctx, client, cluster.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pvcList.Items).To(HaveLen(len(oldPVCsUUIDs))) + for _, pvc := range pvcList.Items { + if pvc.Labels[utils.ClusterInstanceRoleLabelName] == specs.ClusterRoleLabelReplica { + g.Expect(oldPVCsUUIDs).NotTo(ContainElement(pvc.UID)) + } else { + g.Expect(oldPVCsUUIDs).To(ContainElement(pvc.UID)) + } + } + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + + verifyPostgresVersion := func( + env *environment.TestingEnvironment, primary *corev1.Pod, oldStdOut string, targetMajor int, + ) { + Eventually(func(g Gomega) { + stdOut, stdErr, err := exec.EventuallyExecQueryInInstancePod(env.Ctx, env.Client, env.Interface, + env.RestClientConfig, + exec.PodLocator{Namespace: primary.GetNamespace(), PodName: primary.GetName()}, postgres.AppDBName, + "SELECT version();", 60, objects.PollingTime) + g.Expect(err).ToNot(HaveOccurred(), "failed to execute version query") + g.Expect(stdErr).To(BeEmpty(), "unexpected stderr output when checking version") + g.Expect(stdOut).ToNot(Equal(oldStdOut), "postgres version did not change") + g.Expect(stdOut).To(ContainSubstring(strconv.Itoa(targetMajor)), + fmt.Sprintf("version string doesn't contain expected major version %d: %s", targetMajor, stdOut)) + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + + verifyCleanupAfterUpgrade := func(ctx context.Context, client client.Client, primary *corev1.Pod) { + shouldHaveBeenDeleted := []string{ + "/var/lib/postgresql/data/pgdata/pg_upgrade_output.d", + "/var/lib/postgresql/data/pgdata-new", + "/var/lib/postgresql/data/pgwal-new", + } + timeout := time.Second * 20 + for _, path := range shouldHaveBeenDeleted { + _, stdErr, err := exec.CommandInInstancePod(ctx, client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: primary.GetNamespace(), PodName: primary.GetName()}, &timeout, + "stat", path) + Expect(err).To(HaveOccurred(), "path: %s", path) + Expect(stdErr).To(ContainSubstring("No such file or directory"), "path: %s", path) + } + } + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + + versionInfo := determineVersionsForTesting() + var err error + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + storageClass := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + Expect(storageClass).ToNot(BeEmpty()) + + // We cannot use generated entries in the DescribeTable, so we use the scenario key as a constant, but + // define the actual content here. + // See https://onsi.github.io/ginkgo/#mental-model-table-specs-are-just-syntactic-sugar + scenarios = buildScenarios(namespace, storageClass, versionInfo) + }) + + DescribeTable("can upgrade a Cluster to a newer major version", func(scenarioName string) { + By("Creating the starting cluster") + // Avoid running Postgis major upgrade tests when a custom registry is being specified, because our + // PostGIS images are still based on Debian bullseye which uses OpenSSL 1.1, thus making them incompatible + // with any other image that uses OpenSSL 3.0 or greater. + // TODO: remove once we have PostGIS bookworm images + if scenarioName == postgisEntry && os.Getenv(customImageRegistryEnvVar) != "" { + Skip("Skipping PostGIS major upgrades when a custom registry is specified") + } + + scenario := scenarios[scenarioName] + + if scenarioName == postgisEntry && scenario.targetMajor > 17 { + // PostGIS images are not available for Postgres versions greater than 17 + Skip("Skipping major upgrades on PostGIS images for Postgres versions greater than 17") + } + + cluster := scenario.startingCluster + err := env.Client.Create(env.Ctx, cluster) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(cluster.Namespace, cluster.Name, testTimeouts[timeouts.ClusterIsReady], + env) + + By("Collecting the pods UUIDs") + podList, err := clusterutils.ListPods(env.Ctx, env.Client, cluster.Name, cluster.Namespace) + Expect(err).ToNot(HaveOccurred()) + oldPodsUUIDs := make([]types.UID, len(podList.Items)) + for i, pod := range podList.Items { + oldPodsUUIDs[i] = pod.UID + } + + By("Collecting the PVCs UUIDs") + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, cluster.Namespace) + Expect(err).ToNot(HaveOccurred()) + oldPVCsUUIDs := make([]types.UID, len(pvcList.Items)) + for i, pvc := range pvcList.Items { + oldPVCsUUIDs[i] = pvc.UID + } + + By("Checking the starting version of the cluster") + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + + oldStdOut, stdErr, err := exec.EventuallyExecQueryInInstancePod(env.Ctx, env.Client, env.Interface, + env.RestClientConfig, + exec.PodLocator{Namespace: primary.GetNamespace(), PodName: primary.GetName()}, postgres.AppDBName, + "SELECT version();", 60, objects.PollingTime) + Expect(err).ToNot(HaveOccurred()) + Expect(stdErr).To(BeEmpty()) + Expect(oldStdOut).To(ContainSubstring(strconv.Itoa(scenario.startingMajor))) + + By("Updating the major") + Eventually(func() error { + cluster, err = clusterutils.Get(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + if err != nil { + return err + } + cluster.Spec.ImageName = scenario.targetImage + return env.Client.Update(env.Ctx, cluster) + }).WithTimeout(1*time.Minute).WithPolling(10*time.Second).Should( + Succeed(), + "Failed to update cluster image from %s to %s", + cluster.Spec.ImageName, + scenario.targetImage, + ) + + By("Waiting for the cluster to be in the major upgrade phase") + Eventually(func(g Gomega) { + cluster, err = clusterutils.Get(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.Phase).To(Equal(v1.PhaseMajorUpgrade)) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + AssertClusterIsReady(cluster.Namespace, cluster.Name, testTimeouts[timeouts.ClusterIsReady], env) + + // The upgrade destroys all the original pods and creates new ones. We want to make sure that we have + // the same amount of pods as before, but with different UUIDs. + By("Verifying the pods UUIDs have changed") + verifyPodsChanged(env.Ctx, env.Client, cluster, oldPodsUUIDs) + + // The upgrade destroys all the original PVCs and creates new ones, except for the ones associated to the + // primary. We want to make sure that we have the same amount of PVCs as before, but with different UUIDs, + // which should be the same instead for the primary PVCs. + By("Verifying the replicas' PVCs have changed") + verifyPVCsChanged(env.Ctx, env.Client, cluster, oldPVCsUUIDs) + + // Check that the version has been updated + By("Verifying the cluster is running the target version") + verifyPostgresVersion(env, primary, oldStdOut, scenario.targetMajor) + + // Expect temporary files to be deleted + By("Checking no leftovers exist from the upgrade") + verifyCleanupAfterUpgrade(env.Ctx, env.Client, primary) + }, + Entry("PostGIS", postgisEntry), + Entry("PostgreSQL", postgresqlEntry), + Entry("PostgreSQL minimal", postgresqlMinimalEntry), + ) +}) diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 571db88a81..dbea8c78f4 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -20,18 +23,18 @@ import ( "fmt" "os" "strings" - "time" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/types" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -62,40 +65,59 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin It("can import a database with large objects", func() { var err error const namespacePrefix = "microservice-large-object" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile) Expect(err).ToNot(HaveOccurred()) oid := 16393 data := "large object test" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) - AssertCreateTestData(namespace, sourceClusterName, tableName, psqlClientPod) - AssertCreateTestDataLargeObject(namespace, sourceClusterName, oid, data, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + AssertCreateTestDataLargeObject(namespace, sourceClusterName, oid, data) importedClusterName = "cluster-pgdump-large-object" cluster := AssertClusterImport(namespace, importedClusterName, sourceClusterName, "app") - AssertDataExpectedCount(namespace, importedClusterName, tableName, 2, psqlClientPod) - AssertLargeObjectValue(namespace, importedClusterName, oid, data, psqlClientPod) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: importedClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + AssertLargeObjectValue(namespace, importedClusterName, oid, data) By("deleting the imported database", func() { - Expect(testsUtils.DeleteObject(env, cluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) }) }) It("can import a database", func() { var err error const namespacePrefix = "microservice" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) assertCreateTableWithDataOnSourceCluster(namespace, tableName, sourceClusterName) importedClusterName = "cluster-pgdump" AssertClusterImport(namespace, importedClusterName, sourceClusterName, "app") - AssertDataExpectedCount(namespace, importedClusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: importedClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) assertTableAndDataOnImportedCluster(namespace, tableName, importedClusterName) }) @@ -104,7 +126,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin const namespacePrefix = "microservice-different-db" importedClusterName = "cluster-pgdump-different-db" // create namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertImportRenamesSelectedDatabase(namespace, sourceSampleFile, importedClusterName, tableName, "") @@ -115,9 +137,9 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin // nonexistent database in cluster definition while importing var err error const namespacePrefix = "cnpg-microservice-error" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) @@ -149,19 +171,19 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin Expect(postgresImage).ShouldNot(BeEmpty(), "POSTGRES_IMG env should not be empty") // this test case is only applicable if we are not already on the latest major - if shouldSkip(postgresImage) { + if postgres.IsLatestMajor(postgresImage) { Skip("Already running on the latest major. This test is not applicable for PostgreSQL " + postgresImage) } // Gather the target image - targetImage, err := testsUtils.BumpPostgresImageMajorVersion(postgresImage) + targetImage, err := postgres.BumpPostgresImageMajorVersion(postgresImage) Expect(err).ToNot(HaveOccurred()) Expect(targetImage).ShouldNot(BeEmpty(), "targetImage could not be empty") By(fmt.Sprintf("import cluster with different major, target version is %s", targetImage), func() { var err error // create namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertImportRenamesSelectedDatabase(namespace, sourceSampleFile, importedClusterName, tableName, targetImage) @@ -169,20 +191,6 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin }) }) -// shouldSkip skip this test if the current POSTGRES_IMG is already the latest major -func shouldSkip(postgresImage string) bool { - // Get the current tag - currentImageReference := utils.NewReference(postgresImage) - currentImageVersion, err := postgres.GetPostgresVersionFromTag(currentImageReference.Tag) - Expect(err).ToNot(HaveOccurred()) - // Get the default tag - defaultImageReference := utils.NewReference(versions.DefaultImageName) - defaultImageVersion, err := postgres.GetPostgresVersionFromTag(defaultImageReference.Tag) - Expect(err).ToNot(HaveOccurred()) - - return currentImageVersion >= defaultImageVersion -} - // assertCreateTableWithDataOnSourceCluster will create on the source Cluster, as postgres superUser: // 1. a new user `micro` // 2. a new table with 2 records owned by `micro` in the `app` database @@ -194,9 +202,8 @@ func assertCreateTableWithDataOnSourceCluster( ) { By("create user, insert record in new table, assign new user as owner "+ "and grant read only to app user", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 query := fmt.Sprintf( "DROP USER IF EXISTS micro; "+ @@ -206,12 +213,14 @@ func assertCreateTableWithDataOnSourceCluster( "GRANT SELECT ON %[1]v TO app;", tableName) - _, _, err = env.ExecCommand( - env.Ctx, - *pod, - specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "app", "-tAc", query) + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.AppDBName, + query) Expect(err).ToNot(HaveOccurred()) }) } @@ -223,38 +232,30 @@ func assertTableAndDataOnImportedCluster( importedClusterName string, ) { By("verifying presence of table and data from source in imported cluster", func() { - pod, err := env.GetClusterPrimary(namespace, importedClusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, importedClusterName) Expect(err).ToNot(HaveOccurred()) By("Verifying imported table has owner app user", func() { queryImported := fmt.Sprintf( - "select * from pg_tables where tablename = '%v' and tableowner = '%v'", + "select * from pg_catalog.pg_tables where tablename = '%v' and tableowner = '%v'", tableName, - testsUtils.AppUser, - ) - out, _, err := env.ExecCommandWithPsqlClient( - namespace, - importedClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - queryImported, + postgres.AppUser, ) + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.AppDBName, + queryImported) Expect(err).ToNot(HaveOccurred()) Expect(strings.Contains(out, tableName), err).Should(BeTrue()) }) By("verifying the user named 'micro' on source is not in imported database", func() { - outUser, _, err := env.ExecCommandWithPsqlClient( - namespace, - importedClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - "\\du", - ) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(outUser, "micro"), err).Should(BeFalse()) + Eventually(QueryMatchExpectationPredicate(pod, postgres.PostgresDBName, + roleExistsQuery("micro"), "f"), 30).Should(Succeed()) }) }) } @@ -271,24 +272,25 @@ func assertImportRenamesSelectedDatabase( ) { dbList := []string{"db1", "db2", "db3"} dbToImport := dbList[1] - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 By("creating multiple dbs on source and set ownership to app", func() { for _, db := range dbList { // Create database createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER app", db) - _, _, err = env.ExecCommand( - env.Ctx, - *primaryPod, - specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "-tAc", createDBQuery) + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + createDBQuery) Expect(err).ToNot(HaveOccurred()) } }) @@ -296,49 +298,45 @@ func assertImportRenamesSelectedDatabase( By(fmt.Sprintf("creating table '%s' and insert records on selected db %v", tableName, dbToImport), func() { // create a table with two records query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName) - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - dbToImport, - query, - ) + _, err = postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, dbToImport, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) var importedCluster *apiv1.Cluster By("importing Database with microservice approach in a new cluster", func() { - importedCluster, err = testsUtils.ImportDatabaseMicroservice(namespace, clusterName, - importedClusterName, imageName, dbToImport, env) + importedCluster, err = importdb.ImportDatabaseMicroservice(env.Ctx, env.Client, namespace, clusterName, + importedClusterName, imageName, dbToImport) Expect(err).ToNot(HaveOccurred()) // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, importedClusterName, 1000, env) - AssertClusterStandbysAreStreaming(namespace, importedClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, importedClusterName, 140) }) - AssertDataExpectedCount(namespace, importedClusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: importedClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) By("verifying that only 'app' DB exists in the imported cluster", func() { - importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName) + importedPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, importedClusterName) Expect(err).ToNot(HaveOccurred()) - out, _, err := env.ExecCommandWithPsqlClient( - namespace, - importedClusterName, - importedPrimaryPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - "\\l", - ) - Expect(err).ToNot(HaveOccurred(), err) - Expect(strings.Contains(out, "db2"), err).Should(BeFalse()) - Expect(strings.Contains(out, "app"), err).Should(BeTrue()) + + Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, postgres.PostgresDBName, + roleExistsQuery("db2"), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, postgres.PostgresDBName, + roleExistsQuery("app"), "t"), 30).Should(Succeed()) }) By("cleaning up the clusters", func() { err = DeleteResourcesFromFile(namespace, sampleFile) Expect(err).ToNot(HaveOccurred()) - Expect(testsUtils.DeleteObject(env, importedCluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, importedCluster)).To(Succeed()) }) } diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index e2dd3ad181..e12a7407ca 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,18 +13,25 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( + "database/sql" "fmt" "os" - "strings" + + "github.com/lib/pq" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -49,9 +57,9 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD databaseTwo = "db2" ) - var namespace, sourceClusterName, sourceClusterHost, - sourceClusterSuperUser, sourceClusterPass, - targetClusterHost, targetClusterSuperUser, targetClusterPass string + var namespace, sourceClusterName string + var forwardTarget *postgres.PSQLForwardConnection + var connTarget *sql.DB BeforeEach(func() { if testLevelEnv.Depth < int(level) { @@ -66,48 +74,41 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD By("creating the source cluster", func() { const namespacePrefix = "cluster-monolith" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceClusterFile, env) }) - By("creating several roles, one of them a superuser", func() { + By("creating several roles, one of them a superuser and source databases", func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + sourceClusterName, + postgres.PostgresDBName, + apiv1.SuperUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // create 1st user with superuser role createSuperUserQuery := fmt.Sprintf("create user %v with superuser password '123';", databaseSuperUser) - sourceClusterHost, err = testsUtils.GetHostName(namespace, sourceClusterName, env) - Expect(err).ToNot(HaveOccurred()) - sourceClusterSuperUser, sourceClusterPass, err = testsUtils.GetCredentials( - sourceClusterName, namespace, apiv1.SuperUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - testsUtils.PostgresDBName, - sourceClusterSuperUser, - sourceClusterPass, - createSuperUserQuery, - env, - ) + _, err = conn.Exec(createSuperUserQuery) Expect(err).ToNot(HaveOccurred()) // create 2nd user createUserQuery := fmt.Sprintf("create user %v;", databaseUserTwo) - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - testsUtils.PostgresDBName, - sourceClusterSuperUser, - sourceClusterPass, - createUserQuery, - env, - ) + _, err = conn.Exec(createUserQuery) Expect(err).ToNot(HaveOccurred()) - }) - By("creating the source databases", func() { queries := []string{ fmt.Sprintf("create database %v;", databaseOne), fmt.Sprintf("alter database %v owner to %v;", databaseOne, databaseSuperUser), @@ -116,30 +117,22 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD } for _, query := range queries { - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - testsUtils.PostgresDBName, - sourceClusterSuperUser, - sourceClusterPass, - query, - env, - ) + _, err := conn.Exec(query) Expect(err).ToNot(HaveOccurred()) } // create test data and insert some records in both databases for _, database := range sourceDatabases { - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - database, - sourceClusterSuperUser, - sourceClusterPass, - query, - env, - ) + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName) + conn, err := forward.GetPooler().Connection(database) + Expect(err).ToNot(HaveOccurred()) + // We need to set the max idle connection back to a higher number + // otherwise the conn.Exec() will close the connection + // and that will produce a RST packet from PostgreSQL that will kill the + // port-forward tunnel + // More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark + conn.SetMaxIdleConns(3) + _, err = conn.Exec(query) Expect(err).ToNot(HaveOccurred()) } }) @@ -147,73 +140,97 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD By("creating target cluster", func() { postgresImage := os.Getenv("POSTGRES_IMG") Expect(postgresImage).ShouldNot(BeEmpty(), "POSTGRES_IMG env should not be empty") - expectedImageName, err := testsUtils.BumpPostgresImageMajorVersion(postgresImage) + expectedImageName, err := postgres.BumpPostgresImageMajorVersion(postgresImage) Expect(err).ToNot(HaveOccurred()) Expect(expectedImageName).ShouldNot(BeEmpty(), "imageName could not be empty") - _, err = testsUtils.ImportDatabasesMonolith(namespace, + + _, err = importdb.ImportDatabasesMonolith( + env.Ctx, + env.Client, + namespace, sourceClusterName, targetClusterName, expectedImageName, sourceDatabases, sourceRoles, - env) + ) + Expect(err).ToNot(HaveOccurred()) + AssertClusterIsReady(namespace, targetClusterName, testTimeouts[timeouts.ClusterIsReady], env) + }) + + By("connect to the imported cluster", func() { + forwardTarget, connTarget, err = postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + targetClusterName, + postgres.PostgresDBName, + apiv1.SuperUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, targetClusterName, testTimeouts[testsUtils.ClusterIsReady], env) }) By("verifying that the specified source databases were imported", func() { - targetClusterHost, err = testsUtils.GetHostName(namespace, targetClusterName, env) + stmt, err := connTarget.Prepare("SELECT datname FROM pg_catalog.pg_database WHERE datname IN ($1)") Expect(err).ToNot(HaveOccurred()) - targetClusterSuperUser, targetClusterPass, err = testsUtils.GetCredentials( - targetClusterName, namespace, apiv1.SuperUserSecretSuffix, env) + rows, err := stmt.QueryContext(env.Ctx, pq.Array(sourceDatabases)) Expect(err).ToNot(HaveOccurred()) - for _, database := range sourceDatabases { - databaseEntryQuery := fmt.Sprintf("SELECT datname FROM pg_database where datname='%v'", database) - stdOut, _, err := testsUtils.RunQueryFromPod( - psqlClientPod, - targetClusterHost, - testsUtils.PostgresDBName, - targetClusterSuperUser, - targetClusterPass, - databaseEntryQuery, - env, - ) + var datName string + for rows.Next() { + err = rows.Scan(&datName) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(stdOut, database)).Should(BeTrue()) + Expect(sourceDatabases).Should(ContainElement(datName)) } }) + By("verifying that no extra application database or owner were created", func() { + stmt, err := connTarget.Prepare("SELECT count(*) FROM pg_catalog.pg_database WHERE datname = $1") + Expect(err).ToNot(HaveOccurred()) + var matchCount int + err = stmt.QueryRowContext(env.Ctx, "app").Scan(&matchCount) + Expect(err).ToNot(HaveOccurred()) + Expect(matchCount).To(BeZero(), "app database should not exist") + stmt, err = connTarget.Prepare("SELECT count(*) from pg_catalog.pg_user WHERE usename = $1") + Expect(err).ToNot(HaveOccurred()) + err = stmt.QueryRowContext(env.Ctx, "app").Scan(&matchCount) + Expect(err).ToNot(HaveOccurred()) + Expect(matchCount).To(BeZero(), "app user should not exist") + }) + By(fmt.Sprintf("verifying that the source superuser '%s' became a normal user in target", databaseSuperUser), func() { - getSuperUserQuery := "select * from pg_user where usesuper" - stdOut, _, err := testsUtils.RunQueryFromPod( - psqlClientPod, - targetClusterHost, - testsUtils.PostgresDBName, - targetClusterSuperUser, - targetClusterPass, - getSuperUserQuery, - env, - ) + row := connTarget.QueryRow(fmt.Sprintf( + "SELECT usesuper FROM pg_catalog.pg_user WHERE usename='%s'", + databaseSuperUser)) + var superUser bool + err := row.Scan(&superUser) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(stdOut, databaseSuperUser)).Should(BeFalse()) + Expect(superUser).Should(BeFalse()) }) By("verifying the test data was imported from the source databases", func() { for _, database := range sourceDatabases { - selectQuery := fmt.Sprintf("select count(*) from %v", tableName) - stdOut, _, err := testsUtils.RunQueryFromPod( - psqlClientPod, - targetClusterHost, - database, - targetClusterSuperUser, - targetClusterPass, - selectQuery, - env, - ) + selectQuery := fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName) + connTemp, err := forwardTarget.GetPooler().Connection(database) Expect(err).ToNot(HaveOccurred()) - Expect(strings.TrimSpace(stdOut)).Should(BeEquivalentTo("2")) + // We need to set the max idle connection back to a higher number + // otherwise the conn.Exec() will close the connection + // and that will produce a RST packet from PostgreSQL that will kill the + // port-forward tunnel + // More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark + connTemp.SetMaxIdleConns(3) + row := connTemp.QueryRow(selectQuery) + var count int + err = row.Scan(&count) + Expect(err).ToNot(HaveOccurred()) + Expect(count).To(BeEquivalentTo(2)) } }) + + By("close connection to imported and the source cluster", func() { + forwardTarget.Close() + }) }) }) diff --git a/tests/e2e/cluster_setup_test.go b/tests/e2e/cluster_setup_test.go index e3f6eabfaf..cf6672e3e4 100644 --- a/tests/e2e/cluster_setup_test.go +++ b/tests/e2e/cluster_setup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -27,7 +30,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -39,7 +44,9 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun clusterName = "postgresql-storage-class" level = tests.Highest ) + var namespace string + BeforeEach(func() { if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") @@ -51,13 +58,13 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("having three PostgreSQL pods with status ready", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(utils.CountReadyPods(podList.Items), err).Should(BeEquivalentTo(3)) }) @@ -73,23 +80,27 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun err := env.Client.Get(env.Ctx, namespacedName, pod) Expect(err).ToNot(HaveOccurred()) - // Put something in the database. We'll check later if it still exists - appUser, appUserPass, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).NotTo(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) Expect(err).NotTo(HaveOccurred()) + query := "CREATE TABLE IF NOT EXISTS test (id bigserial PRIMARY KEY, t text);" - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - host, - testsUtils.AppDBName, - appUser, - appUserPass, - query, - env, - ) - Expect(err).ToNot(HaveOccurred()) + _, err = conn.Exec(query) + Expect(err).NotTo(HaveOccurred()) + + // Here we need to close the connection and close the forward, if we don't do both steps + // the PostgreSQL connection will be there and PostgreSQL will not restart in time because + // of the connection that wasn't close and stays idle + _ = conn.Close() + forward.Close() // We kill the pid 1 process. // The pod should be restarted and the count of the restarts @@ -118,18 +129,26 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun return int32(-1), nil }, timeout).Should(BeEquivalentTo(restart + 1)) - Eventually(func() (bool, error) { - query = "SELECT * FROM test" - _, _, err = env.ExecCommandWithPsqlClient( - namespace, - clusterName, - psqlClientPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - return err == nil, err - }, timeout).Should(BeTrue()) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) + + forward, conn, err = postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).NotTo(HaveOccurred()) + + _, err = conn.Exec("SELECT * FROM test") + Expect(err).NotTo(HaveOccurred()) }) }) @@ -137,7 +156,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun const namespacePrefix = "cluster-conditions" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("having a %v namespace", namespace), func() { @@ -164,7 +183,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun // scale up the cluster to verify if the cluster remains in Ready By("scaling up the cluster size", func() { - err := env.ScaleClusterSize(namespace, clusterName, 5) + err := clusterutils.ScaleSize(env.Ctx, env.Client, namespace, clusterName, 5) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/e2e/commons_test.go b/tests/e2e/commons_test.go index 38dc4007f8..4315e16411 100644 --- a/tests/e2e/commons_test.go +++ b/tests/e2e/commons_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,37 +13,41 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e -import "github.com/cloudnative-pg/cloudnative-pg/tests/utils" +import ( + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/cloudvendors" +) -func MustGetEnvProfile() utils.EnvProfile { - return utils.GetEnvProfile(*testCloudVendorEnv) +func MustGetEnvProfile() cloudvendors.EnvProfile { + return cloudvendors.GetEnvProfile(*testCloudVendorEnv) } // IsAKS checks if the running cluster is on AKS func IsAKS() bool { - return *testCloudVendorEnv == utils.AKS + return *testCloudVendorEnv == cloudvendors.AKS } // IsEKS checks if the running cluster is on EKS func IsEKS() bool { - return *testCloudVendorEnv == utils.EKS + return *testCloudVendorEnv == cloudvendors.EKS } // IsGKE checks if the running cluster is on GKE func IsGKE() bool { - return *testCloudVendorEnv == utils.GKE + return *testCloudVendorEnv == cloudvendors.GKE } // IsLocal checks if the running cluster is on local func IsLocal() bool { - return *testCloudVendorEnv == utils.LOCAL + return *testCloudVendorEnv == cloudvendors.LOCAL } // IsOpenshift checks if the running cluster is on OpenShift func IsOpenshift() bool { - return *testCloudVendorEnv == utils.OCP + return *testCloudVendorEnv == cloudvendors.OCP } diff --git a/tests/e2e/config_support_test.go b/tests/e2e/config_support_test.go index bf29de31e1..4d06170296 100644 --- a/tests/e2e/config_support_test.go +++ b/tests/e2e/config_support_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -21,10 +24,14 @@ import ( "github.com/onsi/ginkgo/v2/types" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -43,16 +50,47 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, level = tests.Low ) var operatorNamespace, namespace string + var initialConfigMap *corev1.ConfigMap + var initialSecret *corev1.Secret BeforeEach(func() { if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") } + }) - operatorDeployment, err := env.GetOperatorDeployment() + BeforeAll(func() { + operatorDeployment, err := operator.GetDeployment(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - operatorNamespace = operatorDeployment.GetNamespace() + + // Save the initial configMap + initialConfigMap = &corev1.ConfigMap{} + err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: operatorNamespace, Name: configName}, + initialConfigMap) + if !apierrors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + initialConfigMap.SetResourceVersion("") + initialConfigMap.SetUID("") + initialConfigMap.SetCreationTimestamp(metav1.Time{}) + initialConfigMap.SetSelfLink("") + initialConfigMap.SetGeneration(0) + initialConfigMap.SetManagedFields(nil) + + // Save the initial secret + initialSecret = &corev1.Secret{} + err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: operatorNamespace, Name: configName}, + initialSecret) + if !apierrors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + initialSecret.SetResourceVersion("") + initialSecret.SetUID("") + initialSecret.SetCreationTimestamp(metav1.Time{}) + initialSecret.SetSelfLink("") + initialSecret.SetGeneration(0) + initialSecret.SetManagedFields(nil) }) AfterAll(func() { @@ -74,14 +112,24 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, err = env.Client.Delete(env.Ctx, secret) Expect(err).NotTo(HaveOccurred()) - err = utils.ReloadOperatorDeployment(env, 120) + // Create preexisting ConfigMap and Secret + if initialConfigMap.Name != "" { + err = env.Client.Create(env.Ctx, initialConfigMap) + Expect(err).ToNot(HaveOccurred()) + } + if initialSecret.Name != "" { + err = env.Client.Create(env.Ctx, initialSecret) + Expect(err).ToNot(HaveOccurred()) + } + + err = operator.ReloadDeployment(env.Ctx, env.Client, 120) Expect(err).ToNot(HaveOccurred()) }) It("creates the configuration map and secret", func() { // create a config map where operator is deployed cmd := fmt.Sprintf("kubectl apply -n %v -f %v", operatorNamespace, configMapFile) - _, _, err := utils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) // Check if configmap is created Eventually(func() ([]corev1.ConfigMap, error) { @@ -95,7 +143,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, // create a secret where operator is deployed cmd = fmt.Sprintf("kubectl apply -n %v -f %v", operatorNamespace, secretFile) - _, _, err = utils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) // Check if configmap is created Eventually(func() ([]corev1.Secret, error) { @@ -108,30 +156,31 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, }, 10).Should(HaveLen(1)) // Reload the operator with the new config - err = utils.ReloadOperatorDeployment(env, 120) + err = operator.ReloadDeployment(env.Ctx, env.Client, 120) Expect(err).ToNot(HaveOccurred()) }) It("creates a cluster", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterWithInheritedLabelsFile, env) }) It("verify label's and annotation's inheritance when global config-map changed", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) By("checking the cluster has the requested labels", func() { expectedLabels := map[string]string{"environment": "qaEnv"} - Expect(utils.ClusterHasLabels(cluster, expectedLabels)).To(BeTrue()) + Expect(clusterutils.HasLabels(cluster, expectedLabels)).To(BeTrue()) }) By("checking the pods inherit labels matching the ones in the configuration secret", func() { expectedLabels := map[string]string{"environment": "qaEnv"} Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedLabels) + return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName, + expectedLabels) }, 180).Should(BeTrue()) }) By("checking the pods inherit labels matching wildcard ones in the configuration secret", func() { @@ -140,17 +189,19 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, "example.com/prod": "prod", } Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedLabels) + return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName, + expectedLabels) }, 180).Should(BeTrue()) }) By("checking the cluster has the requested annotation", func() { expectedAnnotations := map[string]string{"categories": "DatabaseApplication"} - Expect(utils.ClusterHasAnnotations(cluster, expectedAnnotations)).To(BeTrue()) + Expect(clusterutils.HasAnnotations(cluster, expectedAnnotations)).To(BeTrue()) }) By("checking the pods inherit annotations matching the ones in the configuration configMap", func() { expectedAnnotations := map[string]string{"categories": "DatabaseApplication"} Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveAnnotations(env, namespace, clusterName, expectedAnnotations) + return clusterutils.AllPodsHaveAnnotations(env.Ctx, env.Client, namespace, clusterName, + expectedAnnotations) }, 180).Should(BeTrue()) }) By("checking the pods inherit annotations matching wildcard ones in the configuration configMap", func() { @@ -159,7 +210,8 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, "example.com/prod": "prod", } Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedAnnotations) + return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName, + expectedAnnotations) }, 180).Should(BeTrue()) }) }) @@ -167,7 +219,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, // Setting MONITORING_QUERIES_CONFIGMAP: "" should disable monitoring // queries on new cluster. We expect those metrics to be missing. It("verify metrics details when updated default monitoring configMap queries parameter is set to be empty", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) collectAndAssertDefaultMetricsPresentOnEachPod(namespace, clusterName, cluster.IsMetricsTLSEnabled(), false) diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 28e921a492..61622098f4 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -31,9 +34,11 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - devUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -66,7 +71,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Parameters = paramsMap return env.Client.Update(env.Ctx, cluster) @@ -78,7 +83,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.PgHBA = []string{"host all all all trust"} return env.Client.Update(env.Ctx, cluster) @@ -90,7 +95,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.PgIdent = []string{"email /^(.*)@example\\.com \\1"} return env.Client.Update(env.Ctx, cluster) @@ -103,22 +108,28 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) cluster.Spec.PostgresConfiguration.Parameters = params return env.Client.Update(env.Ctx, cluster) }) Expect(apierrors.IsInvalid(err)).To(BeTrue()) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Expect other config parameters applied together with a blockedParameter to not have changed for idx := range podList.Items { pod := podList.Items[idx] Eventually(func(g Gomega) int { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show autovacuum_max_workers") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show autovacuum_max_workers") g.Expect(err).ToNot(HaveOccurred()) value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) @@ -136,7 +147,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("create cluster with default configuration", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) @@ -144,21 +155,25 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada It("01. reloading Pg when a parameter requiring reload is modified", func() { // max_connection increase to 110 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("apply configuration update", func() { - // Update the configuration updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 300) }) By("verify that work_mem result as expected", func() { // Check that the parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show work_mem") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(8)) @@ -170,41 +185,40 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada endpointName := clusterName + "-rw" // Connection should fail now because we are not supplying a password - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - By("verify that connection should failed by default", func() { - _, _, err := devUtils.ExecCommand( - env.Ctx, - env.Interface, - env.RestClientConfig, - podList.Items[0], - specs.PostgresContainerName, - &commandTimeout, + By("verify that connections fail by default", func() { + _, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, podList.Items[0], + specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1", ) Expect(err).To(HaveOccurred()) }) By("apply configuration update", func() { - // Update the configuration updateClusterPostgresPgHBA(namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 300) }) - By("verify that connection should success after pg_hba_reload", func() { + By("verify that connections succeed after pg_hba_reload", func() { // The new pg_hba rule should be present in every pod + query := "select count(*) from pg_catalog.pg_hba_file_rules where type = 'host' and auth_method = 'trust'" for _, pod := range podList.Items { Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(*) from pg_hba_file_rules where type = 'host' and auth_method = 'trust'") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + query) return strings.Trim(stdout, "\n"), err }, timeout).Should(BeEquivalentTo("1")) } // The connection should work now Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, podList.Items[0], + stdout, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, podList.Items[0], specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) @@ -216,26 +230,30 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada It("03. restarting and switching Pg when a parameter requiring restart is modified", func() { timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary By("apply configuration update", func() { - // Update the configuration postgresParams["shared_buffers"] = "256MB" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) }) By("verify that shared_buffers setting changed", func() { // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show shared_buffers") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show shared_buffers") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(256), @@ -245,7 +263,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover happened", func() { // Check that a switchover happened Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) @@ -253,34 +271,44 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada It("04. restarting and switching Pg when mixed parameters are modified", func() { timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary By("apply configuration update", func() { - // Update the configuration postgresParams["max_replication_slots"] = "16" postgresParams["maintenance_work_mem"] = "128MB" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) }) By("verify that both parameters have been modified in each pod", func() { // Check that both parameters have been modified in each pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_replication_slots") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show max_replication_slots") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(16)) Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show maintenance_work_mem") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show maintenance_work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(128)) @@ -290,7 +318,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover happened", func() { // Check that a switchover happened Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) @@ -313,10 +341,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada func() { // max_connection decrease to 105 timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -325,15 +353,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada delete(postgresParams, "port") postgresParams["max_connections"] = "105" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) }) By("verify that max_connections has been decreased in every pod", func() { // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_connections") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(105), @@ -343,7 +376,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover not happened", func() { // Check that a switchover did not happen Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).Should(BeEquivalentTo(oldPrimary)) }) @@ -355,26 +388,30 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada func() { timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary By("apply configuration update", func() { - // Update the configuration delete(postgresParams, "max_connections") updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) }) By("verify that the max_connections has been set to default in every pod", func() { // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_connections") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(100), @@ -384,59 +421,51 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover not happened", func() { // Check that a switchover did not happen Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).Should(BeEquivalentTo(oldPrimary)) }) }) + // pg_ident_file_mappings is available from v15 only It("09. reloading Pg when pg_ident rules are modified", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - stdout, _, err := env.ExecCommand(env.Ctx, podList.Items[0], specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(1) from pg_views where viewname = 'pg_ident_file_mappings';") - psqlHasIdentView := err == nil && strings.Trim(stdout, "\n") == "1" + if env.PostgresVersion > 14 { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + query := "select count(1) from pg_catalog.pg_ident_file_mappings;" - By("check that there is only one entry in pg_ident_file_mappings", func() { - for _, pod := range podList.Items { - if psqlHasIdentView { - Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(1) from pg_ident_file_mappings;") - return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("1")) - } - } - }) + By("check that there is the expected number of entry in pg_ident_file_mappings", func() { + Eventually(func() (string, error) { + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) + return strings.Trim(stdout, "\n"), err + }, timeout).Should(BeEquivalentTo("3")) + }) - By("apply configuration update", func() { - // Update the configuration - updateClusterPostgresPgIdent(namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 300) - }) + By("apply configuration update", func() { + updateClusterPostgresPgIdent(namespace) + }) - By("verify that there are now two entries in pg_ident_file_mappings", func() { - for _, pod := range podList.Items { - if psqlHasIdentView { - Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(1) from pg_ident_file_mappings;") - return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("2")) - } else { - // Can't check for the actual content of the file, but let's check that we can reload the config - Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "select count(1) where pg_reload_conf();") - return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("1")) - } - } - }) + By("verify that there is one more entry in pg_ident_file_mappings", func() { + Eventually(func() (string, error) { + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) + return strings.Trim(stdout, "\n"), err + }, timeout).Should(BeEquivalentTo("4")) + }) + } }) }) @@ -458,10 +487,10 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La const namespacePrefix = "config-change-primary-update-restart" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterFileWithPrimaryUpdateRestart) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterFileWithPrimaryUpdateRestart) Expect(err).ToNot(HaveOccurred()) By("setting up cluster with primaryUpdateMethod value set to restart", func() { @@ -478,47 +507,50 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La var primaryStartTime time.Time By("getting old primary info", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimaryPodName = primaryPodInfo.GetName() - appUser, appUserPass, err := utils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - host, err := utils.GetHostName(namespace, clusterName, env) + defer func() { + // Here we need to close the connection and close the forward, if we don't do both steps + // the PostgreSQL connection will be there and PostgreSQL will not restart in time because + // of the connection that wasn't close and stays idle + _ = conn.Close() + forward.Close() + }() + + query := "SELECT TO_CHAR(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" + var startTime string + row := conn.QueryRow(query) + err = row.Scan(&startTime) Expect(err).ToNot(HaveOccurred()) - query := "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" - stdout, _, cmdErr := utils.RunQueryFromPod( - psqlClientPod, - host, - utils.AppDBName, - appUser, - appUserPass, - query, - env) - Expect(cmdErr).ToNot(HaveOccurred()) - primaryStartTime, err = cnpgTypes.ParseTargetTime(nil, strings.Trim(stdout, "\n")) + primaryStartTime, err = cnpgTypes.ParseTargetTime(nil, startTime) Expect(err).NotTo(HaveOccurred()) - query = "show max_connections" - stdout, _, cmdErr = utils.RunQueryFromPod( - psqlClientPod, - host, - utils.AppDBName, - appUser, - appUserPass, - query, - env) - Expect(cmdErr).ToNot(HaveOccurred()) - v, err := strconv.Atoi(strings.Trim(stdout, "\n")) - Expect(err).NotTo(HaveOccurred()) + query = "show max_connections" + row = conn.QueryRow(query) + var maxConnections int + err = row.Scan(&maxConnections) + Expect(err).ToNot(HaveOccurred()) - newMaxConnectionsValue = v + 10 + newMaxConnectionsValue = maxConnections + 10 }) By(fmt.Sprintf("updating max_connection value to %v", newMaxConnectionsValue), func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -529,14 +561,19 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }) By("verifying the new value for max_connections is updated for all instances", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_connections") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, 180).Should(BeEquivalentTo(newMaxConnectionsValue), @@ -546,13 +583,12 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La By("verifying the old primary is still the primary", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, 60).Should(BeEquivalentTo(oldPrimaryPodName)) }) By("verifying that old primary was actually restarted", func() { - commandTimeout := time.Second * 10 pod := corev1.Pod{} err := env.Client.Get(env.Ctx, types.NamespacedName{ Namespace: namespace, @@ -561,9 +597,17 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La Expect(err).ToNot(HaveOccurred()) // take pg postmaster start time - stdout, _, cmdErr := env.EventuallyExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');") + query := "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" + stdout, _, cmdErr := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, postgres.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(cmdErr).ToNot(HaveOccurred()) newStartTime, err := cnpgTypes.ParseTargetTime(nil, strings.Trim(stdout, "\n")) @@ -576,10 +620,9 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La It("work_mem config change should not require a restart", func() { const expectedNewValueForWorkMem = "10MB" - commandTimeout := time.Second * 10 By("updating work mem ", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -589,20 +632,25 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }) By("verify that work_mem result as expected", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Check that the parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show work_mem") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, 160).Should(BeEquivalentTo(10)) } }) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 120) }) }) }) diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go index 34c2df06e7..8eb28dd75c 100644 --- a/tests/e2e/connection_test.go +++ b/tests/e2e/connection_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -23,7 +26,8 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -51,20 +55,19 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity appDBUser string, appPassword string, superuserPassword string, - env *utils.TestingEnvironment, + env *environment.TestingEnvironment, ) { // We test -rw, -ro and -r services with the app user and the superuser - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - rService := fmt.Sprintf("%v-r.%v.svc", clusterName, namespace) - roService := fmt.Sprintf("%v-ro.%v.svc", clusterName, namespace) + rwService := fmt.Sprintf("%v-rw", clusterName) + rService := fmt.Sprintf("%v-r", clusterName) + roService := fmt.Sprintf("%v-ro", clusterName) services := []string{rwService, roService, rService} for _, service := range services { - AssertConnection(service, "postgres", appDBName, superuserPassword, *psqlClientPod, 10, env) - AssertConnection(service, appDBUser, appDBName, appPassword, *psqlClientPod, 10, env) + AssertConnection(namespace, service, appDBName, postgres.PostgresDBName, superuserPassword, env) } - AssertWritesToReplicaFails(psqlClientPod, roService, appDBName, appDBUser, appPassword) - AssertWritesToPrimarySucceeds(psqlClientPod, rwService, appDBName, appDBUser, appPassword) + AssertWritesToReplicaFails(namespace, roService, appDBName, appDBUser, appPassword) + AssertWritesToPrimarySucceeds(namespace, rwService, appDBName, appDBUser, appPassword) } Context("Auto-generated passwords", func() { @@ -78,7 +81,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity It("can connect with auto-generated passwords", func() { // Create a cluster in a namespace we'll delete after the test var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -123,7 +126,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity // Create a cluster in a namespace we'll delete after the test var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) AssertServices(namespace, clusterName, appDBName, appDBUser, diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index ee9f59942a..9ccebdf60d 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,18 +13,26 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( + "fmt" "time" "k8s.io/apimachinery/pkg/types" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,11 +41,11 @@ import ( // - spinning up a cluster, apply a declarative database on it // Set of tests in which we use the declarative database CRD to add new databases on an existing cluster -var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke, tests.LabelBasic), func() { +var _ = Describe("Declarative database management", Label(tests.LabelSmoke, tests.LabelBasic, + tests.LabelDeclarativeDatabases), func() { const ( - clusterManifest = fixturesDir + "/declarative_databases/cluster.yaml.template" - databaseManifest = fixturesDir + "/declarative_databases/database.yaml.template" - level = tests.Medium + clusterManifest = fixturesDir + "/declarative_databases/cluster.yaml.template" + level = tests.Medium ) BeforeEach(func() { @@ -45,24 +54,22 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke } }) - Context("plain vanilla cluster", Ordered, func() { + Context("in a plain vanilla cluster", Ordered, func() { const ( namespacePrefix = "declarative-db" - databaseCrdName = "db-declarative" dbname = "declarative" ) var ( clusterName, namespace string - database *apiv1.Database + err error ) BeforeAll(func() { - var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) By("setting up cluster and declarative database CRD", func() { @@ -70,65 +77,168 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke }) }) - assertDatabaseExists := func(namespace, primaryPod, dbname string, shouldContain bool) { + assertDatabaseHasExpectedFields := func(namespace, primaryPod string, db apiv1.Database) { + query := fmt.Sprintf("select count(*) from pg_catalog.pg_database where datname = '%s' "+ + "and encoding = pg_char_to_encoding('%s') and datctype = '%s' and datcollate = '%s'", + db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate) Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod, }, "postgres", - "\\l") + query) g.Expect(err).ToNot(HaveOccurred()) - if shouldContain { - g.Expect(stdout).Should(ContainSubstring(dbname)) - } else { - g.Expect(stdout).ShouldNot(ContainSubstring(dbname)) + g.Expect(stdout).Should(ContainSubstring("1"), "expected database not found") + }, 30).Should(Succeed()) + } + + assertTestDeclarativeDatabase := func( + databaseManifest string, + retainOnDeletion bool, + ) { + var ( + database apiv1.Database + databaseObjectName string + ) + By("applying Database CRD manifest", func() { + CreateResourceFromFile(namespace, databaseManifest) + databaseObjectName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) + Expect(err).NotTo(HaveOccurred()) + }) + By("ensuring the Database CRD succeeded reconciliation", func() { + // get database object + database = apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseObjectName, + } + + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, &database) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + g.Expect(database.Status.Message).Should(BeEmpty()) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("verifying new database has been created with the expected fields", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, + databaseExistsQuery(dbname), "t"), 30).Should(Succeed()) + + assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database) + }) + + By("verifying the extension presence in the target database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for _, extSpec := range database.Spec.Extensions { + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, exec.DatabaseName(database.Spec.Name), + extensionExistsQuery(extSpec.Name), boolPGOutput(true)), 30).Should(Succeed()) + } + }) + + By("verifying the schema presence in the target database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for _, schemaSpec := range database.Spec.Schemas { + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, exec.DatabaseName(database.Spec.Name), + schemaExistsQuery(schemaSpec.Name), boolPGOutput(true)), 30).Should(Succeed()) + } + }) + + By("verifying the fdw presence in the target database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for _, fdwSpec := range database.Spec.FDWs { + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, exec.DatabaseName(database.Spec.Name), + fdwExistsQuery(fdwSpec.Name), boolPGOutput(true)), 30).Should(Succeed()) } - }, 300).Should(Succeed()) + }) + + By("removing the Database object", func() { + Expect(objects.Delete(env.Ctx, env.Client, &database)).To(Succeed()) + }) + + By("verifying the retention policy in the postgres database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, + databaseExistsQuery(dbname), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) + }) } - When("Database CRD reclaim policy is set to retain (default) inside spec", func() { - It("can add a declarative database", func() { - By("applying Database CRD manifest", func() { - CreateResourceFromFile(namespace, databaseManifest) - _, err := env.GetResourceNameFromYAML(databaseManifest) - Expect(err).NotTo(HaveOccurred()) - }) - By("ensuring the Database CRD succeeded reconciliation", func() { - // get database object - database = &apiv1.Database{} - databaseNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: databaseCrdName, - } - - Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, databaseNamespacedName, database) - Expect(err).ToNot(HaveOccurred()) - g.Expect(database.Status.Ready).Should(BeTrue()) - }, 300).WithPolling(10 * time.Second).Should(Succeed()) - }) - - By("verifying new database has been added", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - assertDatabaseExists(namespace, primaryPodInfo.Name, dbname, true) - }) + When("Database CRD reclaim policy is set to delete", func() { + It("can manage a declarative database and delete it in Postgres", func() { + databaseManifest := fixturesDir + + "/declarative_databases/database-with-delete-reclaim-policy.yaml.template" + assertTestDeclarativeDatabase(databaseManifest, + false) }) + }) - It("keeps the db when Database CRD is removed", func() { - By("remove Database CRD", func() { - Expect(utils.DeleteObject(env, database)).To(Succeed()) - }) + When("Database CRD reclaim policy is set to retain", func() { + It("can manage a declarative database and release it", func() { + databaseManifest := fixturesDir + "/declarative_databases/database.yaml.template" + assertTestDeclarativeDatabase(databaseManifest, true) + }) + }) + }) - By("verifying database is still existing", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + Context("in a Namespace to be deleted manually", func() { + const ( + namespace = "declarative-db-finalizers" + ) + var ( + err error + clusterName string + databaseObjectName string + ) + It("will not prevent the deletion of the namespace with lagging finalizers", func() { + By("setting up the new namespace and cluster", func() { + err = namespaces.CreateNamespace(env.Ctx, env.Client, namespace) + Expect(err).ToNot(HaveOccurred()) + + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) + Expect(err).ToNot(HaveOccurred()) - assertDatabaseExists(namespace, primaryPodInfo.Name, dbname, true) - }) + AssertCreateCluster(namespace, clusterName, clusterManifest, env) + }) + By("creating the database", func() { + databaseManifest := fixturesDir + + "/declarative_databases/database-with-delete-reclaim-policy.yaml.template" + databaseObjectName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) + Expect(err).NotTo(HaveOccurred()) + CreateResourceFromFile(namespace, databaseManifest) + }) + By("ensuring the database is reconciled successfully", func() { + // get database object + dbObj := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseObjectName, + } + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, dbObj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(dbObj.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + By("deleting the namespace and making sure it succeeds before timeout", func() { + err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, namespace, 120) + Expect(err).ToNot(HaveOccurred()) + // we need to cleanup testing logs adhoc since we are not using a testingNamespace for this test + err = namespaces.CleanupClusterLogs(namespace, CurrentSpecReport().Failed()) + Expect(err).ToNot(HaveOccurred()) }) }) }) diff --git a/tests/e2e/declarative_hibernation_test.go b/tests/e2e/declarative_hibernation_test.go index 0f1b164af7..b8eb7997ae 100644 --- a/tests/e2e/declarative_hibernation_test.go +++ b/tests/e2e/declarative_hibernation_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -24,6 +27,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -46,20 +52,26 @@ var _ = Describe("Cluster declarative hibernation", func() { It("hibernates an existing cluster", func(ctx SpecContext) { const namespacePrefix = "declarative-hibernation" - clusterName, err := env.GetResourceNameFromYAML(sampleFileCluster) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileCluster) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating a new cluster", func() { AssertCreateCluster(namespace, clusterName, sampleFileCluster, env) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) }) By("hibernating the new cluster", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) if cluster.Annotations == nil { cluster.Annotations = make(map[string]string) @@ -72,19 +84,20 @@ var _ = Describe("Cluster declarative hibernation", func() { By("waiting for the cluster to be hibernated correctly", func() { Eventually(func(g Gomega) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(meta.IsStatusConditionTrue(cluster.Status.Conditions, hibernation.HibernationConditionType)).To(BeTrue()) + g.Expect(meta.IsStatusConditionTrue(cluster.Status.Conditions, + hibernation.HibernationConditionType)).To(BeTrue()) }, 300).Should(Succeed()) }) By("verifying that the Pods have been deleted for the cluster", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items)).Should(BeEquivalentTo(0)) }) By("rehydrating the cluster", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) if cluster.Annotations == nil { cluster.Annotations = make(map[string]string) @@ -98,7 +111,7 @@ var _ = Describe("Cluster declarative hibernation", func() { By("waiting for the condition to be removed", func() { Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) condition := meta.FindStatusCondition(cluster.Status.Conditions, hibernation.HibernationConditionType) @@ -108,13 +121,19 @@ var _ = Describe("Cluster declarative hibernation", func() { By("waiting for the Pods to be recreated", func() { Eventually(func(g Gomega) { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(podList.Items)).Should(BeEquivalentTo(cluster.Spec.Instances)) }, 300).Should(Succeed()) }) By("verifying the data has been preserved", func() { - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go index 84986cd3c1..6668943610 100644 --- a/tests/e2e/disk_space_test.go +++ b/tests/e2e/disk_space_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -28,7 +31,11 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,19 +55,20 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { var primaryPod *corev1.Pod By("finding cluster resources", func() { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster).ToNot(BeNil()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(primaryPod).ToNot(BeNil()) }) By("filling the WAL volume", func() { timeout := time.Minute * 5 - _, _, err := env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, @@ -73,35 +81,37 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { By("writing something when no space is available", func() { // Create the table used by the scenario query := "CREATE TABLE diskspace AS SELECT generate_series(1, 1000000);" - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) + _, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.AppDBName, + query) Expect(err).To(HaveOccurred()) - query = "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + + query = "CHECKPOINT; SELECT pg_catalog.pg_switch_wal(); CHECKPOINT" + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.DatabaseName("postgres"), + postgres.PostgresDBName, query) Expect(err).To(HaveOccurred()) }) By("waiting for the primary to become not ready", func() { Eventually(func(g Gomega) bool { - primaryPod, err := env.GetPod(namespace, primaryPod.Name) + primaryPod, err := pods.Get(env.Ctx, env.Client, namespace, primaryPod.Name) g.Expect(err).ToNot(HaveOccurred()) - return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse) + return pods.HasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse) }).WithTimeout(time.Minute).Should(BeTrue()) }) By("checking if the operator detects the issue", func() { Eventually(func(g Gomega) string { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) return cluster.Status.Phase }).WithTimeout(time.Minute).Should(Equal("Not enough disk space")) @@ -114,11 +124,11 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { primaryWALPVC := &corev1.PersistentVolumeClaim{} By("finding cluster resources", func() { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster).ToNot(BeNil()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(primaryPod).ToNot(BeNil()) @@ -159,19 +169,20 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { // We can't delete the Pod, as this will trigger // a failover. Eventually(func(g Gomega) bool { - primaryPod, err := env.GetPod(namespace, primaryPod.Name) + primaryPod, err := pods.Get(env.Ctx, env.Client, namespace, primaryPod.Name) g.Expect(err).ToNot(HaveOccurred()) - return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue) + return pods.HasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue) }).WithTimeout(10 * time.Minute).Should(BeTrue()) }) By("writing some WAL", func() { - query := "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" - _, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + query := "CHECKPOINT; SELECT pg_catalog.pg_switch_wal(); CHECKPOINT" + _, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.DatabaseName("postgres"), + postgres.PostgresDBName, query) Expect(err).NotTo(HaveOccurred()) }) @@ -191,10 +202,10 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { func(sampleFile string) { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index dae77e85c1..4cd2ff25a2 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( "fmt" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -26,8 +30,12 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -43,7 +51,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") } - nodes, _ := env.GetNodeList() + nodes, _ := nodes.List(env.Ctx, env.Client) // We label three nodes where we could run the workloads, and ignore // the others. The pods of the clusters created in this test run only // where the drain label exists. @@ -51,7 +59,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) { nodesWithLabels = append(nodesWithLabels, node.Name) cmd := fmt.Sprintf("kubectl label node %v drain=drain --overwrite", node.Name) - _, stderr, err := testsUtils.Run(cmd) + _, stderr, err := run.Run(cmd) Expect(stderr).To(BeEmpty()) Expect(err).ToNot(HaveOccurred()) } @@ -66,16 +74,90 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La AfterEach(func() { // Uncordon the cordoned nodes and remove the labels we added in the // BeforeEach section - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) for _, node := range nodesWithLabels { cmd := fmt.Sprintf("kubectl label node %v drain- ", node) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } nodesWithLabels = nil }) + Context("Default maintenance and pvc", func() { + const sampleFile = fixturesDir + "/drain-node/cluster-drain-node-karpenter.yaml.template" + const clusterName = "cluster-drain-node-karpenter" + + It("will remove the pod from a node tainted by karpenter", func() { + const namespacePrefix = "drain-node-e2e-karpeter-initiated" + + var namespace string + + By("creating the namespace and the cluster", func() { + var err error + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + }) + + By("waiting for the jobs to be removed", func() { + timeout := 180 + Eventually(func() (int, error) { + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } + return len(podList.Items), err + }, timeout).Should(BeEquivalentTo(3)) + }) + + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + + By("loading test data", func() { + AssertCreateTestData(env, tableLocator) + }) + + oldPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + By("adding a taint from karpenter to the node containing the primary", func() { + cmd := fmt.Sprintf("kubectl taint nodes %v karpenter.sh/disruption:NoSchedule", oldPrimary.Spec.NodeName) + _, _, err := run.Run(cmd) + Expect(err).ToNot(HaveOccurred()) + }) + + By("verifying failover after drain", func() { + timeout := 180 + Eventually(func() (string, error) { + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + return pod.Name, err + }, timeout).ShouldNot(BeEquivalentTo(oldPrimary.Name)) + }) + + By("removing karpenter taint from node", func() { + cmd := fmt.Sprintf( + "kubectl taint nodes %v karpenter.sh/disruption=NoSchedule:NoSchedule-", + oldPrimary.Spec.NodeName, + ) + _, _, err := run.Run(cmd) + Expect(err).ToNot(HaveOccurred()) + }) + + By("data is present and standbys are streaming", func() { + AssertDataExpectedCount(env, tableLocator, 2) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) + }) + }) + }) + Context("Maintenance on, reuse pvc on", func() { // Initialize empty global namespace variable var namespace string @@ -97,13 +179,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // mark a node unschedulable so the pods will be distributed only on two nodes for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-2] { cmd := fmt.Sprintf("kubectl cordon %v", cordonNode) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } }) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -111,22 +193,29 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(3)) }) // Load test data oldPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // We create a mapping between the pod names and the UIDs of // their volumes. We do not expect the UIDs to change. // We take advantage of the fact that related PVCs and Pods have // the same name. - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) pvcUIDMap := make(map[string]types.UID) for _, pod := range podList.Items { @@ -141,20 +230,26 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La } // Drain the node containing the primary pod and store the list of running pods - podsOnPrimaryNode := nodes.DrainPrimaryNode(namespace, clusterName, - testTimeouts[testsUtils.DrainNode], env) + podsOnPrimaryNode := nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, + testTimeouts[testsUtils.DrainNode], + ) By("verifying failover after drain", func() { timeout := 180 // Expect a failover to have happened Eventually(func() (string, error) { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } return pod.Name, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) By("uncordon nodes and check new pods use old pvcs", func() { - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // Ensure evicted pods have restarted and are running. // one of them could have become the new primary. @@ -180,11 +275,8 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La } }) - // Expect the (previously created) test data to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertDataExpectedCount(env, tableLocator, 2) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) }) // Scenario: all the pods of a cluster are on a single node and another schedulable node exists. @@ -211,13 +303,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-1] { cordonNodes = append(cordonNodes, cordonNode) cmd := fmt.Sprintf("kubectl cordon %v", cordonNode) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } }) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -225,22 +317,31 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(3)) }) // Load test data oldPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // We create a mapping between the pod names and the UIDs of // their volumes. We do not expect the UIDs to change. // We take advantage of the fact that related PVCs and Pods have // the same name. - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + pvcUIDMap := make(map[string]types.UID) for _, pod := range podList.Items { pvcNamespacedName := types.NamespacedName{ @@ -257,19 +358,25 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // to move to. By(fmt.Sprintf("uncordon one more node '%v'", cordonNodes[0]), func() { cmd := fmt.Sprintf("kubectl uncordon %v", cordonNodes[0]) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) // Drain the node containing the primary pod and store the list of running pods - podsOnPrimaryNode := nodes.DrainPrimaryNode(namespace, clusterName, - testTimeouts[testsUtils.DrainNode], env) + podsOnPrimaryNode := nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, + testTimeouts[testsUtils.DrainNode], + ) By("verifying failover after drain", func() { timeout := 180 // Expect a failover to have happened Eventually(func() (string, error) { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } return pod.Name, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) @@ -298,11 +405,8 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La } }) - // Expect the (previously created) test data to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertDataExpectedCount(env, tableLocator, 2) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) }) }) }) @@ -332,13 +436,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La By("leaving a single uncordoned", func() { for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-1] { cmd := fmt.Sprintf("kubectl cordon %v", cordonNode) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } }) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -347,7 +451,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(3)) }) @@ -356,7 +463,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // not exist anymore after the drain var podsBeforeDrain []string By("retrieving the current pods' names", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podsBeforeDrain = append(podsBeforeDrain, pod.Name) @@ -364,20 +471,27 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Load test data - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // We uncordon a cordoned node. New pods can go there. By("uncordon node for pod failover", func() { cmd := fmt.Sprintf("kubectl uncordon %v", nodesWithLabels[0]) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) // Drain the node containing the primary pod. Pods should be moved // to the node we've just uncordoned - nodes.DrainPrimaryNode(namespace, clusterName, testTimeouts[testsUtils.DrainNode], env) + nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, testTimeouts[testsUtils.DrainNode], + ) // Expect pods to be recreated and to be ready AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) @@ -387,7 +501,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La timeout := 600 Eventually(func(g Gomega) { matchingNames := 0 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { // compare the old pod list with the current pod names @@ -402,12 +516,9 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }, timeout).Should(Succeed()) }) - // Expect the (previously created) test data to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) - err = nodes.UncordonAllNodes(env) + AssertDataExpectedCount(env, tableLocator, 2) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) + err = nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) }) }) @@ -421,7 +532,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) }) @@ -429,49 +540,63 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La It("can drain the primary node and recover the cluster when uncordoned", func() { AssertCreateCluster(namespace, clusterName, sampleFile, env) + var drainedNodeName string By("waiting for the jobs to be removed", func() { // Wait for jobs to be removed timeout := 180 + var podList *corev1.PodList Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + var err error + podList, err = pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(1)) + drainedNodeName = podList.Items[0].Spec.NodeName }) // Load test data - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // Drain the node containing the primary pod and store the list of running pods - _ = nodes.DrainPrimaryNode(namespace, clusterName, - testTimeouts[testsUtils.DrainNode], env) + _ = nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, + testTimeouts[testsUtils.DrainNode], + ) - By("verifying the primary is now pending", func() { - timeout := 180 - // Expect a failover to have happened - Eventually(func() (string, error) { - pod, err := env.GetPod(namespace, clusterName+"-1") - if err != nil { - return "", err - } - return string(pod.Status.Phase), err - }, timeout).Should(BeEquivalentTo("Pending")) + By("verifying the primary is now pending or somewhere else", func() { + Eventually(func(g Gomega) { + pod, err := pods.Get(env.Ctx, env.Client, namespace, clusterName+"-1") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pod).Should(SatisfyAny( + HaveField("Spec.NodeName", Not(BeEquivalentTo(drainedNodeName))), + HaveField("Status.Phase", BeEquivalentTo("Pending")), + )) + }).WithTimeout(180 * time.Second).WithPolling(PollingTime * time.Second).Should(Succeed()) }) By("uncordoning all nodes", func() { - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) }) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertDataExpectedCount(env, tableLocator, 2) }) }) When("the PDB is enabled", func() { It("prevents the primary node from being drained", func() { By("enabling PDB", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -483,7 +608,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La By("having the draining of the primary node rejected", func() { var primaryNode string Eventually(func(g Gomega) { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) primaryNode = pod.Spec.NodeName }, 60).Should(Succeed()) @@ -493,14 +618,14 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La cmd := fmt.Sprintf( "kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds", primaryNode, 60) - _, stderr, err := testsUtils.RunUnchecked(cmd) + _, stderr, err := run.Unchecked(cmd) g.Expect(err).To(HaveOccurred()) g.Expect(stderr).To(ContainSubstring("Cannot evict pod as it would violate the pod's disruption budget")) }, 60).Should(Succeed()) }) By("uncordoning all nodes", func() { - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/tests/e2e/eviction_test.go b/tests/e2e/eviction_test.go index 077d3a1f56..59881d4055 100644 --- a/tests/e2e/eviction_test.go +++ b/tests/e2e/eviction_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -28,7 +31,11 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -59,7 +66,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { multiInstanceSampleFile = fixturesDir + "/eviction/multi-instance-cluster.yaml.template" ) - evictPod := func(podName string, namespace string, env *testsUtils.TestingEnvironment, timeoutSeconds uint) error { + evictPod := func(podName string, namespace string, env *environment.TestingEnvironment, timeoutSeconds uint) error { var pod corev1.Pod err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, @@ -119,18 +126,18 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { } const namespacePrefix = "single-instance-pod-eviction" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating a cluster", func() { // Create a cluster in a namespace we'll delete after the test - clusterName, err := env.GetResourceNameFromYAML(singleInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, singleInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, singleInstanceSampleFile, env) }) }) It("evicts the primary pod in single instance cluster", func() { - clusterName, err := env.GetResourceNameFromYAML(singleInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, singleInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) podName := clusterName + "-1" err = evictPod(podName, namespace, env, 60) @@ -152,7 +159,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { }) By("checking the cluster is healthy", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) }) }) @@ -176,20 +183,20 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { BeforeAll(func() { const namespacePrefix = "multi-instance-pod-eviction" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("Creating a cluster with multiple instances", func() { // Create a cluster in a namespace and shared in containers, we'll delete after the test - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, multiInstanceSampleFile, env) }) By("retrieving the nodeName for primary pod", func() { var primaryPod *corev1.Pod - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) taintNodeName = primaryPod.Spec.NodeName }) @@ -197,8 +204,9 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { AfterAll(func() { if needRemoveTaint { By("cleaning the taint on node", func() { - cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", taintNodeName) - _, _, err := testsUtils.Run(cmd) + cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", + taintNodeName) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) } @@ -207,12 +215,12 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { It("evicts the replica pod in multiple instance cluster", func() { var podName string - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) // Find the standby pod By("getting standby pod to evict", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items)).To(BeEquivalentTo(3)) for _, pod := range podList.Items { // Avoid parting non ready nodes, non active nodes, or primary nodes @@ -243,16 +251,16 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { }) By("checking the cluster is healthy", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) }) It("evicts the primary pod in multiple instance cluster", func() { var primaryPod *corev1.Pod - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // We can not use patch to simulate the eviction of a primary pod; @@ -260,34 +268,36 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { By("taint the node to simulate pod been evicted", func() { cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute", taintNodeName) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) needRemoveTaint = true time.Sleep(3 * time.Second) cmd = fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", taintNodeName) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) needRemoveTaint = false }) By("checking switchover happens", func() { - Eventually(func() bool { - podList, err := env.GetClusterPodList(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return false, err + } for _, p := range podList.Items { if specs.IsPodPrimary(p) && primaryPod.GetName() != p.GetName() { - return true + return true, nil } } - return false + return false, nil }, 60).Should(BeTrue()) }) // Pod need rejoin, need more time By("checking the cluster is healthy", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) }) }) diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index b3f6731aa1..a6a7e4055d 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -27,7 +30,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -52,13 +60,14 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // We check that the currentPrimary is the -1 instance as expected, // and we define the targetPrimary (-3) and pausedReplica (-2). By("checking that CurrentPrimary and TargetPrimary are equal", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To( BeEquivalentTo(cluster.Status.TargetPrimary)) currentPrimary = cluster.Status.CurrentPrimary // Gather pod names - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).NotTo(HaveOccurred()) Expect(len(podList.Items), err).To(BeEquivalentTo(3)) for _, p := range podList.Items { pods = append(pods, p.Name) @@ -72,16 +81,23 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // In this way we know that this standby will lag behind when // we do some work on the primary. By("pausing the walreceiver on the 2nd node of the Cluster", func() { - primaryPod, err := env.GetPod(namespace, currentPrimary) + primaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) - pausedPod, err := env.GetPod(namespace, pausedReplica) + pausedPod, err := podutils.Get(env.Ctx, env.Client, namespace, pausedReplica) Expect(err).ToNot(HaveOccurred()) // Get the walreceiver pid - query := "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walreceiver'" - out, _, err := env.EventuallyExecCommand( - env.Ctx, *pausedPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", query) + query := "SELECT pid FROM pg_catalog.pg_stat_activity WHERE backend_type = 'walreceiver'" + out, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pausedPod.Namespace, + PodName: pausedPod.Name, + }, postgres.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) pid = strings.Trim(out, "\n") @@ -92,52 +108,83 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // Terminate the pausedReplica walsender on the primary. // We don't want to wait for the replication timeout. - query = fmt.Sprintf("SELECT pg_terminate_backend(pid) FROM pg_stat_replication "+ + query = fmt.Sprintf("SELECT pg_catalog.pg_terminate_backend(pid) FROM pg_catalog.pg_stat_replication "+ "WHERE application_name = '%v'", pausedReplica) - _, _, err = env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", query) + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, postgres.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) // Expect the primary to have lost connection with the stopped standby - Eventually(func() (int, error) { - primaryPod, err = env.GetPod(namespace, currentPrimary) - Expect(err).ToNot(HaveOccurred()) - return utils.CountReplicas(env, primaryPod) - }, RetryTimeout).Should(BeEquivalentTo(1)) + Eventually(func(g Gomega) { + primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + primaryPod, RetryTimeout)).To(BeEquivalentTo(1)) + }, RetryTimeout).Should(Succeed()) }) // Perform a CHECKPOINT on the primary and wait for the working standby // to replicate at it By("generating some WAL traffic in the Cluster", func() { - primaryPod, err := env.GetPod(namespace, currentPrimary) + primaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) // Gather the current WAL LSN - initialLSN, _, err := env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "SELECT pg_current_wal_lsn()") + initialLSN, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, postgres.PostgresDBName, + "SELECT pg_catalog.pg_current_wal_lsn()", + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) // Execute a checkpoint - _, _, err = env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "CHECKPOINT") + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, postgres.PostgresDBName, + "CHECKPOINT", + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) + query := fmt.Sprintf("SELECT true FROM pg_catalog.pg_stat_replication "+ + "WHERE application_name = '%v' AND replay_lsn > '%v'", + targetPrimary, strings.Trim(initialLSN, "\n")) // The replay_lsn of the targetPrimary should be ahead // of the one before the checkpoint - Eventually(func() (string, error) { - primaryPod, err = env.GetPod(namespace, currentPrimary) - Expect(err).ToNot(HaveOccurred()) - query := fmt.Sprintf("SELECT true FROM pg_stat_replication "+ - "WHERE application_name = '%v' AND replay_lsn > '%v'", - targetPrimary, strings.Trim(initialLSN, "\n")) - out, _, err := env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", query) - return strings.TrimSpace(out), err - }, RetryTimeout).Should(BeEquivalentTo("t")) + Eventually(func(g Gomega) { + primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) + g.Expect(err).ToNot(HaveOccurred()) + out, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, postgres.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(out)).To(BeEquivalentTo("t")) + }, RetryTimeout).Should(Succeed()) }) // Force-delete the primary. Eventually the cluster should elect a @@ -147,18 +194,18 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err := env.DeletePod(namespace, currentPrimary, quickDelete) + err := podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete) Expect(err).ToNot(HaveOccurred()) // We wait until the operator knows that the primary is dead. // At this point the promotion is waiting for all the walreceivers // to be disconnected. We can send the SIGCONT now. Eventually(func() (int, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.ReadyInstances, err }, RetryTimeout).Should(BeEquivalentTo(2)) - pausedPod, err := env.GetPod(namespace, pausedReplica) + pausedPod, err := podutils.Get(env.Ctx, env.Client, namespace, pausedReplica) Expect(err).ToNot(HaveOccurred()) // Send the SIGCONT to the walreceiver PID to resume execution @@ -170,22 +217,22 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { By("making sure that the operator is enforcing the switchover delay") timeout := 120 Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimaryFailingSinceTimestamp, err }, timeout).Should(Not(Equal(""))) } - By("making sure that the the targetPrimary has switched away from current primary") + By("making sure that the targetPrimary has switched away from current primary") // The operator should eventually set the cluster target primary to // the instance we expect to take that role (-3). Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.TargetPrimary, err - }, testTimeouts[utils.NewTargetOnFailover]). + }, testTimeouts[timeouts.NewTargetOnFailover]). ShouldNot( Or(BeEquivalentTo(currentPrimary), BeEquivalentTo(apiv1.PendingFailoverMarker))) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.TargetPrimary, err).To( BeEquivalentTo(targetPrimary)) }) @@ -194,9 +241,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // operator to the target primary By("waiting for the TargetPrimary to become CurrentPrimary", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err - }, testTimeouts[utils.NewPrimaryAfterFailover]).Should(BeEquivalentTo(targetPrimary)) + }, testTimeouts[timeouts.NewPrimaryAfterFailover]).Should(BeEquivalentTo(targetPrimary)) }) } @@ -214,9 +261,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { ) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -230,10 +277,10 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { namespacePrefix = "failover-e2e-delay" ) var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) diff --git a/tests/e2e/fastfailover_test.go b/tests/e2e/fastfailover_test.go index debccca369..4de03448a0 100644 --- a/tests/e2e/fastfailover_test.go +++ b/tests/e2e/fastfailover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -45,14 +48,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La Skip("Test depth is lower than the amount requested for this test") } - // The walreceiver of a standby that wasn't promoted may try to reconnect - // before the rw service endpoints are updated. In this case, the walreceiver - // can be stuck for waiting for the connection to be established for a time that - // depends on the tcp_syn_retries sysctl. Since by default - // net.ipv4.tcp_syn_retries=6, PostgreSQL can wait 2^7-1=127 seconds before - // restarting the walreceiver. if !IsLocal() { - maxReattachTime = 180 maxFailoverTime = 30 } }) @@ -68,7 +64,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La clusterName = "cluster-fast-failover" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertFastFailOver(namespace, sampleFileWithoutReplicationSlots, clusterName, webTestFile, webTestJob, maxReattachTime, maxFailoverTime) @@ -86,7 +82,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La clusterName = "cluster-fast-failover" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertFastFailOver(namespace, sampleFileWithReplicationSlots, clusterName, webTestFile, webTestJob, maxReattachTime, maxFailoverTime) @@ -100,7 +96,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La clusterName = "cluster-syncreplicas-fast-failover" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertFastFailOver( namespace, sampleFileSyncReplicas, clusterName, webTestSyncReplicas, webTestJob, maxReattachTime, maxFailoverTime) diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go index 86aac345ca..6a7909eb6a 100644 --- a/tests/e2e/fastswitchover_test.go +++ b/tests/e2e/fastswitchover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -19,16 +22,22 @@ package e2e import ( "fmt" "strings" - "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -62,7 +71,7 @@ var _ = Describe("Fast switchover", Serial, Label(tests.LabelPerformance, tests. // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "primary-switchover-time" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertFastSwitchover(namespace, sampleFileWithoutReplicationSlots, clusterName, webTestFile, webTestJob) }) @@ -72,7 +81,7 @@ var _ = Describe("Fast switchover", Serial, Label(tests.LabelPerformance, tests. // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "primary-switchover-time-with-slots" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertFastSwitchover(namespace, sampleFileWithReplicationSlots, clusterName, webTestFile, webTestJob) AssertClusterHAReplicationSlots(namespace, clusterName) @@ -101,29 +110,20 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe CreateResourceFromFile(namespace, sampleFile) }) By("having a Cluster with three instances ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) // Node 1 should be the primary, so the -rw service should // point there. We verify this. By("having the current primary on node1", func() { - endpointName := clusterName + "-rw" - endpoint := &corev1.Endpoints{} - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } + rwServiceName := clusterName + "-rw" + endpointSlice, err := utils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, rwServiceName) + Expect(err).ToNot(HaveOccurred()) + oldPrimary = clusterName + "-1" pod := &corev1.Pod{} - podNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: oldPrimary, - } - err := env.Client.Get(env.Ctx, endpointNamespacedName, - endpoint) + err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: oldPrimary}, pod) Expect(err).ToNot(HaveOccurred()) - err = env.Client.Get(env.Ctx, podNamespacedName, pod) - Expect(utils.FirstEndpointIP(endpoint), err).To( - BeEquivalentTo(pod.Status.PodIP)) + Expect(utils.FirstEndpointSliceIP(endpointSlice)).To(BeEquivalentTo(pod.Status.PodIP)) }) By("preparing the db for the test scenario", func() { // Create the table used by the scenario @@ -137,17 +137,10 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe ", PRIMARY KEY (id)" + ")" - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - _, _, err = env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - utils.AppDBName, - query, - ) + _, err := postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -157,36 +150,44 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe // on the postgres primary. We make sure that the first // records appear on the database before moving to the next // step. - _, _, err := utils.Run("kubectl create -n " + namespace + + _, _, err := run.Run("kubectl create -n " + namespace + " -f " + webTestFile) Expect(err).ToNot(HaveOccurred()) - _, _, err = utils.Run("kubectl create -n " + namespace + + + webtestDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "webtest", Namespace: namespace}} + Expect(deployments.WaitForReady(env.Ctx, env.Client, webtestDeploy, 60)).To(Succeed()) + + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 - timeout := 60 primaryPodNamespacedName := types.NamespacedName{ Namespace: namespace, Name: oldPrimary, } + query := "SELECT count(*) > 0 FROM tps.tl" Eventually(func() (string, error) { primaryPod := &corev1.Pod{} err := env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod) if err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", - "SELECT count(*) > 0 FROM tps.tl") + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.AppDBName, + query) return strings.TrimSpace(out), err - }, timeout).Should(BeEquivalentTo("t")) + }, RetryTimeout).Should(BeEquivalentTo("t")) }) By("setting the TargetPrimary to node2 to trigger a switchover", func() { targetPrimary = clusterName + "-2" err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Status.TargetPrimary = targetPrimary return env.Client.Status().Update(env.Ctx, cluster) @@ -197,16 +198,6 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe var maxReattachTime int32 = 60 var maxSwitchoverTime int32 = 20 - // The walreceiver of a standby that wasn't promoted may try to reconnect - // before the rw service endpoints are updated. In this case, the walreceiver - // can be stuck for waiting for the connection to be established for a time that - // depends on the tcp_syn_retries sysctl. Since by default - // net.ipv4.tcp_syn_retries=6, PostgreSQL can wait 2^7-1=127 seconds before - // restarting the walreceiver. - if !IsLocal() { - maxReattachTime = 180 - } - AssertStandbysFollowPromotion(namespace, clusterName, maxReattachTime) AssertWritesResumedBeforeTimeout(namespace, clusterName, maxSwitchoverTime) diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go index d160cbb583..407801f9c7 100644 --- a/tests/e2e/fencing_test.go +++ b/tests/e2e/fencing_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -29,7 +32,13 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -70,7 +79,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } checkInstanceIsStreaming := func(instanceName, namespace string) { - timeout := time.Second * 10 + query := "SELECT count(*) FROM pg_catalog.pg_stat_wal_receiver" Eventually(func() (int, error) { err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: instanceName}, @@ -78,8 +87,14 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { if err != nil { return 0, err } - out, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &timeout, - "psql", "-U", "postgres", "-tAc", "SELECT count(*) FROM pg_stat_wal_receiver") + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + query) if err != nil { return 0, err } @@ -89,23 +104,23 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } checkPostgresConnection := func(podName, namespace string) { - err := testUtils.GetObject(env, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, &pod) + err := objects.Get(env.Ctx, env.Client, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, &pod) Expect(err).ToNot(HaveOccurred()) timeout := time.Second * 10 dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", - testUtils.PGLocalSocketDir, "postgres", "postgres", "") + postgres.PGLocalSocketDir, "postgres", "postgres", "") stdOut, stdErr, err := utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, pod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", "SELECT 1") Expect(err).To(HaveOccurred(), stdErr, stdOut) } - checkFencingAnnotationSet := func(fencingMethod testUtils.FencingMethod, content []string) { - if fencingMethod != testUtils.UsingAnnotation { + checkFencingAnnotationSet := func(fencingMethod fencing.Method, content []string) { + if fencingMethod != fencing.UsingAnnotation { return } By("checking the cluster has the expected annotation set", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) if len(content) == 0 { Expect(cluster.Annotations).To(Or(Not(HaveKey(utils.FencedInstanceAnnotation)), @@ -119,19 +134,20 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { }) } - assertFencingPrimaryWorks := func(fencingMethod testUtils.FencingMethod) { + assertFencingPrimaryWorks := func(fencingMethod fencing.Method) { It("can fence a primary instance", func() { var beforeFencingPodName string By("fencing the primary instance", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) beforeFencingPodName = primaryPod.GetName() - Expect(testUtils.FencingOn(env, beforeFencingPodName, + Expect(fencing.On(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).Should(Succeed()) }) By("check the instance is not ready, but kept as primary instance", func() { checkInstanceStatusReadyOrNot(beforeFencingPodName, namespace, false) - currentPrimaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterName) Expect(err).ToNot(HaveOccurred()) Expect(beforeFencingPodName).To(Equal(currentPrimaryPodInfo.GetName())) }) @@ -141,29 +157,30 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkPostgresConnection(beforeFencingPodName, namespace) }) By("lift the fencing", func() { - Expect(testUtils.FencingOff(env, beforeFencingPodName, + Expect(fencing.Off(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) }) By("the old primary becomes ready", func() { checkInstanceStatusReadyOrNot(beforeFencingPodName, namespace, true) }) By("the old primary should still be the primary instance", func() { - currentPrimaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterName) Expect(err).ToNot(HaveOccurred()) Expect(beforeFencingPodName).Should(BeEquivalentTo(currentPrimaryPodInfo.GetName())) }) By("all followers should be streaming again from the primary instance", func() { - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) }) checkFencingAnnotationSet(fencingMethod, nil) }) } - assertFencingFollowerWorks := func(fencingMethod testUtils.FencingMethod) { + assertFencingFollowerWorks := func(fencingMethod fencing.Method) { It("can fence a follower instance", func() { var beforeFencingPodName string - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) By("fence a follower instance", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items)).To(BeEquivalentTo(3)) for _, pod := range podList.Items { if specs.IsPodStandby(pod) { @@ -172,7 +189,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } } Expect(beforeFencingPodName).ToNot(BeEmpty()) - Expect(testUtils.FencingOn(env, beforeFencingPodName, + Expect(fencing.On(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) }) checkFencingAnnotationSet(fencingMethod, []string{beforeFencingPodName}) @@ -184,7 +201,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkPostgresConnection(beforeFencingPodName, namespace) }) By("lift the fencing", func() { - Expect(testUtils.FencingOff(env, beforeFencingPodName, + Expect(fencing.Off(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) }) By("the instance becomes ready", func() { @@ -196,41 +213,43 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkFencingAnnotationSet(fencingMethod, nil) }) } - assertFencingClusterWorks := func(fencingMethod testUtils.FencingMethod) { + assertFencingClusterWorks := func(fencingMethod fencing.Method) { It("can fence all the instances in a cluster", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primaryPodName := primaryPod.GetName() By("fence the whole cluster using \"(*)\"", func() { - Expect(testUtils.FencingOn(env, "*", namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) + Expect(fencing.On(env.Ctx, env.Client, "*", namespace, clusterName, + fencingMethod)).ToNot(HaveOccurred()) }) checkFencingAnnotationSet(fencingMethod, []string{"*"}) By("check all instances are not ready", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) for _, pod := range podList.Items { checkInstanceStatusReadyOrNot(pod.GetName(), namespace, false) } }) By("check postgres connection on all instances", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) for _, pod := range podList.Items { checkPostgresConnection(pod.GetName(), namespace) } }) By("lift the fencing", func() { - Expect(testUtils.FencingOff(env, "*", namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) + Expect(fencing.Off(env.Ctx, env.Client, "*", namespace, clusterName, + fencingMethod)).ToNot(HaveOccurred()) }) By("all instances become ready", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) for _, pod := range podList.Items { checkInstanceStatusReadyOrNot(pod.GetName(), namespace, true) } }) By("the old primary is still the primary instance", func() { - podName, err := env.GetClusterPrimary(namespace, clusterName) + podName, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(primaryPodName).Should(BeEquivalentTo(podName.GetName())) }) @@ -245,33 +264,33 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { var err error BeforeAll(func() { const namespacePrefix = "fencing-using-plugin" - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) - assertFencingPrimaryWorks(testUtils.UsingPlugin) - assertFencingFollowerWorks(testUtils.UsingPlugin) - assertFencingClusterWorks(testUtils.UsingPlugin) + assertFencingPrimaryWorks(fencing.UsingPlugin) + assertFencingFollowerWorks(fencing.UsingPlugin) + assertFencingClusterWorks(fencing.UsingPlugin) }) Context("using annotation", Ordered, func() { var err error BeforeAll(func() { const namespacePrefix = "fencing-using-annotation" - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) - assertFencingPrimaryWorks(testUtils.UsingAnnotation) - assertFencingFollowerWorks(testUtils.UsingAnnotation) - assertFencingClusterWorks(testUtils.UsingAnnotation) + assertFencingPrimaryWorks(fencing.UsingAnnotation) + assertFencingFollowerWorks(fencing.UsingAnnotation) + assertFencingClusterWorks(fencing.UsingAnnotation) }) }) diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml index d59a7a38e3..be4373f4ad 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-02 spec: cluster: name: source-cluster-azure diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml index 240b9dd72e..6c6a213162 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-pitr-sas spec: cluster: name: pg-backup-azure-blob-sas diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml index d856776eaa..16f042b3d5 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-pitr spec: cluster: - name: external-cluster-azure + name: source-cluster-azure diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml index 240b9dd72e..2b10ab726f 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-sas spec: cluster: name: pg-backup-azure-blob-sas diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml index b05e52e1fc..8838291d02 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-azurite-02 spec: cluster: name: pg-backup-azurite diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml index 0cf59f9ef1..c4226fc92b 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-02 spec: cluster: name: source-cluster-minio diff --git a/tests/e2e/fixtures/configmap-support/configmap.yaml b/tests/e2e/fixtures/configmap-support/configmap.yaml index 99d5f9d09f..13bc3f3fd1 100644 --- a/tests/e2e/fixtures/configmap-support/configmap.yaml +++ b/tests/e2e/fixtures/configmap-support/configmap.yaml @@ -4,6 +4,8 @@ data: # wrong example2.com on purpose to check overwriting via secret works fine INHERITED_LABELS: environment, example2.com/* MONITORING_QUERIES_CONFIGMAP: "" + CLUSTERS_ROLLOUT_DELAY: '1' + INSTANCES_ROLLOUT_DELAY: '1' kind: ConfigMap metadata: name: cnpg-controller-manager-config diff --git a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template new file mode 100644 index 0000000000..ec59c21a42 --- /dev/null +++ b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template @@ -0,0 +1,33 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: db-declarative-delete +spec: + name: declarative + owner: app + localeCType: C + localeCollate: C + encoding: UTF8 + databaseReclaimPolicy: delete + cluster: + name: cluster-with-declarative-databases + extensions: + - name: bloom + ensure: present + - name: postgres_fdw + ensure: present + schemas: + - name: test_schema + ensure: present + fdws: + - name: postgres_fdw + ensure: present + usage: + - name: app + type: grant + - name: mywrapper + options: + - name: debug + value: 'true' + ensure: present + ensure: present diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index afa83d0ccd..1d67109a2f 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -5,5 +5,29 @@ metadata: spec: name: declarative owner: app + localeCType: C + localeCollate: C + encoding: SQL_ASCII + template: template0 cluster: name: cluster-with-declarative-databases + extensions: + - name: bloom + ensure: present + - name: postgres_fdw + ensure: present + schemas: + - name: test_schema + ensure: present + fdws: + - name: postgres_fdw + ensure: present + usage: + - name: app + type: grant + - name: mywrapper + options: + - name: debug + value: 'true' + ensure: present + ensure: present diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template b/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template new file mode 100644 index 0000000000..1597981714 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template @@ -0,0 +1,48 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: destination-cluster +spec: + instances: 1 + externalClusters: + - name: source-cluster + connectionParameters: + host: source-cluster-rw + user: app + dbname: declarative + port: "5432" + password: + name: source-cluster-app + key: password + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + primaryUpdateMethod: switchover + + bootstrap: + initdb: + database: app + owner: app + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml new file mode 100644 index 0000000000..d4deace971 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: destination-db-declarative +spec: + name: declarative + owner: app + databaseReclaimPolicy: delete + cluster: + name: destination-cluster diff --git a/tests/e2e/fixtures/declarative_pub_sub/pub.yaml b/tests/e2e/fixtures/declarative_pub_sub/pub.yaml new file mode 100644 index 0000000000..bd09d64014 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/pub.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-declarative +spec: + name: pub + dbname: declarative + cluster: + name: source-cluster + target: + allTables: true diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template b/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template new file mode 100644 index 0000000000..398a6613c8 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template @@ -0,0 +1,48 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: source-cluster +spec: + instances: 1 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + pg_hba: + - hostssl replication app all scram-sha-256 + + managed: + roles: + - name: app + ensure: present + login: true + replication: true + + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + primaryUpdateMethod: switchover + + bootstrap: + initdb: + database: app + owner: app + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml new file mode 100644 index 0000000000..4ebcae63ee --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: source-db-declarative +spec: + name: declarative + owner: app + databaseReclaimPolicy: delete + cluster: + name: source-cluster diff --git a/tests/e2e/fixtures/declarative_pub_sub/sub.yaml b/tests/e2e/fixtures/declarative_pub_sub/sub.yaml new file mode 100644 index 0000000000..8eb5aabdc4 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/sub.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: subscription-declarative +spec: + name: sub + dbname: declarative + publicationName: pub + cluster: + name: destination-cluster + externalClusterName: source-cluster diff --git a/tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template b/tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template new file mode 100644 index 0000000000..e268530882 --- /dev/null +++ b/tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template @@ -0,0 +1,33 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-drain-node-karpenter +spec: + instances: 3 + + affinity: + nodeSelector: + drain: 'drain' + + postgresql: + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + wal_receiver_timeout: '2s' + + bootstrap: + initdb: + database: app + owner: appuser + + storage: + size: 1Gi + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml b/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml index 8af0fe341e..347e76dc01 100644 --- a/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml +++ b/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml @@ -14,5 +14,9 @@ spec: - "120" - "-c" - "5" + - "-v" + - "2" + - "-s" + - "3" - "http://webtest:8080/tx" restartPolicy: Never diff --git a/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml b/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml index 8de28f9e4e..541f9bf81d 100644 --- a/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml +++ b/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml @@ -16,7 +16,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: webtest env: - name: PASSWORD @@ -33,6 +33,12 @@ spec: value: "postgres://$(USER):$(PASSWORD)@cluster-syncreplicas-fast-failover-rw/app?sslmode=require&connect_timeout=2" - name: SQL_QUERY value: "insert into tps.tl(source) values ('hey');" + livenessProbe: + tcpSocket: + port: 8080 + readinessProbe: + tcpSocket: + port: 8080 ports: - containerPort: 8080 --- diff --git a/tests/e2e/fixtures/fastfailover/webtest.yaml b/tests/e2e/fixtures/fastfailover/webtest.yaml index 3865c4dffd..e3464cfa10 100644 --- a/tests/e2e/fixtures/fastfailover/webtest.yaml +++ b/tests/e2e/fixtures/fastfailover/webtest.yaml @@ -16,7 +16,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: webtest env: - name: PASSWORD @@ -33,6 +33,12 @@ spec: value: "postgres://$(USER):$(PASSWORD)@cluster-fast-failover-rw/app?sslmode=require&connect_timeout=2" - name: SQL_QUERY value: "insert into tps.tl(source) values ('hey');" + livenessProbe: + tcpSocket: + port: 8080 + readinessProbe: + tcpSocket: + port: 8080 ports: - containerPort: 8080 --- diff --git a/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml b/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml index 8af0fe341e..82192f24d1 100644 --- a/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml +++ b/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml @@ -9,10 +9,14 @@ spec: - name: apache-benchmark image: httpd command: - - "/usr/local/apache2/bin/ab" - - "-t" - - "120" - - "-c" - - "5" - - "http://webtest:8080/tx" + - "/usr/local/apache2/bin/ab" + - "-t" + - "120" + - "-c" + - "5" + - "-v" + - "2" + - "-s" + - "3" + - "http://webtest:8080/tx" restartPolicy: Never diff --git a/tests/e2e/fixtures/fastswitchover/webtest.yaml b/tests/e2e/fixtures/fastswitchover/webtest.yaml index f94effbaa8..8ef5df717f 100644 --- a/tests/e2e/fixtures/fastswitchover/webtest.yaml +++ b/tests/e2e/fixtures/fastswitchover/webtest.yaml @@ -16,7 +16,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: webtest env: - name: PASSWORD @@ -33,6 +33,12 @@ spec: value: "postgres://$(USER):$(PASSWORD)@cluster-fast-switchover-rw/app?sslmode=require&connect_timeout=2" - name: SQL_QUERY value: "insert into tps.tl(source) values ('hey');" + livenessProbe: + tcpSocket: + port: 8080 + readinessProbe: + tcpSocket: + port: 8080 ports: - containerPort: 8080 --- diff --git a/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template b/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template new file mode 100644 index 0000000000..5db523de6e --- /dev/null +++ b/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template @@ -0,0 +1,36 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgresql-with-extensions +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + extensions: + - name: postgis + ld_library_path: + - syslib + image: + reference: ghcr.io/niccolofei/postgis:18beta2-master-bullseye # wokeignore:rule=master + + bootstrap: + initdb: + database: app + owner: app + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/imagevolume_extensions/database.yaml.template b/tests/e2e/fixtures/imagevolume_extensions/database.yaml.template new file mode 100644 index 0000000000..af3929cb07 --- /dev/null +++ b/tests/e2e/fixtures/imagevolume_extensions/database.yaml.template @@ -0,0 +1,26 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: app +spec: + name: app + owner: app + cluster: + name: postgresql-with-extensions + extensions: + - name: postgis + ensure: present + - name: postgis_raster + ensure: present + - name: postgis_sfcgal + ensure: present + - name: fuzzystrmatch + ensure: present + - name: address_standardizer + ensure: present + - name: address_standardizer_data_us + ensure: present + - name: postgis_tiger_geocoder + ensure: present + - name: postgis_topology + ensure: present diff --git a/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template b/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template index 73e2206fd3..e402a2fb9d 100644 --- a/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template +++ b/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template @@ -25,7 +25,7 @@ spec: owner: app options: - "--locale" - - "en_US.utf8" + - "C" # Persistent storage configuration storage: diff --git a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template index c38a36ff0d..d42e607616 100644 --- a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template +++ b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template @@ -27,6 +27,10 @@ spec: enabled: false slotPrefix: _cnpg_ + probes: + readiness: + type: query + # Persistent storage configuration storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template index 29c869a33b..d2cffe85f4 100644 --- a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template +++ b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template @@ -36,6 +36,10 @@ spec: database: app owner: app + probes: + readiness: + type: query + # Persistent storage configuration storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template index f46f729c85..9c419300a0 100644 --- a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template +++ b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template @@ -21,6 +21,10 @@ spec: database: app owner: app + probes: + readiness: + type: query + # Persistent storage configuration storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template new file mode 100644 index 0000000000..daa6861370 --- /dev/null +++ b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template @@ -0,0 +1,27 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgresql-liveness-pinger-disabled + annotations: + alpha.cnpg.io/livenessPinger: '{"enabled": false}' +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-enabled.yaml.template b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-enabled.yaml.template new file mode 100644 index 0000000000..37654455a5 --- /dev/null +++ b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-enabled.yaml.template @@ -0,0 +1,27 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgresql-liveness-pinger-enabled + annotations: + alpha.cnpg.io/livenessPinger: '{"enabled": true}' +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template b/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template index d773df1b11..20cbcd7bf3 100644 --- a/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template @@ -18,6 +18,10 @@ spec: log_autovacuum_min_duration: '1s' 'pg_stat_statements.max': '10000' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template index 5a9dc71f7c..973e8ef640 100644 --- a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template @@ -17,6 +17,10 @@ spec: log_temp_files: '1024' log_autovacuum_min_duration: '1s' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template index 8bb4e43906..bbdccd10fe 100644 --- a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template @@ -17,6 +17,10 @@ spec: log_temp_files: '1024' log_autovacuum_min_duration: '1s' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/preferred.yaml.template b/tests/e2e/fixtures/sync_replicas/preferred.yaml.template new file mode 100644 index 0000000000..737ae75d7b --- /dev/null +++ b/tests/e2e/fixtures/sync_replicas/preferred.yaml.template @@ -0,0 +1,28 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-sync-replica +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 + + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1G diff --git a/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template new file mode 100644 index 0000000000..33075f2eac --- /dev/null +++ b/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template @@ -0,0 +1,31 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-readiness-probe-replica-lag-control +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + probes: + readiness: + type: streaming + maximumLag: 16Mi + failureThreshold: 15 + periodSeconds: 1 + + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1G diff --git a/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template new file mode 100644 index 0000000000..d25f256732 --- /dev/null +++ b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template @@ -0,0 +1,34 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-startup-probe-replica-lag-control +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + probes: + startup: + type: streaming + maximumLag: 16Mi + failureThreshold: 60 + periodSeconds: 1 + readiness: + failureThreshold: 10 + periodSeconds: 1 + + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1G diff --git a/tests/e2e/hibernation_test.go b/tests/e2e/hibernation_test.go deleted file mode 100644 index 29c1ab5b2f..0000000000 --- a/tests/e2e/hibernation_test.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "encoding/json" - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/types" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), func() { - type mode string - type hibernateSatusMessage string - type expectedKeysInStatus string - const ( - sampleFileClusterWithPGWalVolume = fixturesDir + "/base/cluster-storage-class.yaml.template" - sampleFileClusterWithOutPGWalVolume = fixturesDir + "/hibernate/" + - "cluster-storage-class-without-wal.yaml.template" - level = tests.Medium - HibernateOn mode = "on" - HibernateOff mode = "off" - HibernateStatus mode = "status" - clusterOffStatusMessage hibernateSatusMessage = "No Hibernation. Cluster Deployed." - clusterOnStatusMessage hibernateSatusMessage = "Cluster Hibernated" - summaryInStatus expectedKeysInStatus = "summary" - tableName = "test" - ) - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - Context("hibernate", func() { - var namespace string - var err error - getPrimaryAndClusterManifest := func(namespace, clusterName string) ([]byte, string) { - var beforeHibernationClusterInfo *apiv1.Cluster - var clusterManifest []byte - var beforeHibernationCurrentPrimary string - By("collecting current primary details", func() { - beforeHibernationClusterInfo, err = env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - beforeHibernationCurrentPrimary = beforeHibernationClusterInfo.Status.CurrentPrimary - // collect expected cluster manifesto info - clusterManifest, err = json.Marshal(&beforeHibernationClusterInfo) - Expect(err).ToNot(HaveOccurred()) - }) - return clusterManifest, beforeHibernationCurrentPrimary - } - getPvc := func(role persistentvolumeclaim.Meta, instanceName string) corev1.PersistentVolumeClaim { - pvcName := role.GetName(instanceName) - pvcInfo := corev1.PersistentVolumeClaim{} - err = testsUtils.GetObject(env, ctrlclient.ObjectKey{Namespace: namespace, Name: pvcName}, &pvcInfo) - Expect(err).ToNot(HaveOccurred()) - return pvcInfo - } - performHibernation := func(mode mode, namespace, clusterName string) { - By(fmt.Sprintf("performing hibernation %v", mode), func() { - _, _, err := testsUtils.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v", - mode, clusterName, namespace)) - Expect(err).ToNot(HaveOccurred()) - }) - By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() { - Eventually(func(g Gomega) { - podList, _ := env.GetClusterPodList(namespace, clusterName) - g.Expect(len(podList.Items)).Should(BeEquivalentTo(0)) - }, 300).Should(Succeed()) - }) - } - - getHibernationStatusInJSON := func(namespace, clusterName string) map[string]interface{} { - var data map[string]interface{} - By("getting hibernation status", func() { - stdOut, _, err := testsUtils.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v -ojson", - HibernateStatus, clusterName, namespace)) - Expect(err).ToNot(HaveOccurred(), stdOut) - err = json.Unmarshal([]byte(stdOut), &data) - Expect(err).ToNot(HaveOccurred()) - }) - return data - } - - verifySummaryInHibernationStatus := func(clusterName string, message hibernateSatusMessage) { - statusOut := getHibernationStatusInJSON(namespace, clusterName) - actualStatus := statusOut[string(summaryInStatus)].(map[string]interface{})["status"].(string) - Expect(strings.Contains(string(message), actualStatus)).Should(BeEquivalentTo(true), - actualStatus+"\\not-contained-in\\"+string(message)) - } - verifyClusterResources := func(namespace, clusterName string, objs []persistentvolumeclaim.ExpectedObjectCalculator) { - By(fmt.Sprintf("verifying cluster resources are removed "+ - "post hibernation where roles %v", objs), func() { - timeout := 120 - - By(fmt.Sprintf("verifying cluster %v is removed", clusterName), func() { - Eventually(func() (bool, apiv1.Cluster) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return true, apiv1.Cluster{} - } - return false, *cluster - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v PVCs are removed", clusterName), func() { - Eventually(func() (int, error) { - pvcList, err := env.GetPVCList(namespace) - if err != nil { - return -1, err - } - return len(pvcList.Items), nil - }, timeout).Should(BeEquivalentTo(len(objs))) - }) - - By(fmt.Sprintf("verifying cluster %v configMap is removed", clusterName), func() { - Eventually(func() (bool, corev1.ConfigMap) { - configMap := corev1.ConfigMap{} - err = env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: apiv1.DefaultMonitoringConfigMapName}, - &configMap) - if err != nil { - return true, corev1.ConfigMap{} - } - return false, configMap - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v secrets are removed", clusterName), func() { - Eventually(func() (bool, corev1.SecretList, error) { - secretList := corev1.SecretList{} - err = env.Client.List(env.Ctx, &secretList, ctrlclient.InNamespace(namespace)) - if err != nil { - return false, corev1.SecretList{}, err - } - var getClusterSecrets []string - for _, secret := range secretList.Items { - if strings.HasPrefix(secret.GetName(), clusterName) { - getClusterSecrets = append(getClusterSecrets, secret.GetName()) - } - } - if len(getClusterSecrets) == 0 { - return true, corev1.SecretList{}, nil - } - return false, secretList, nil - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v role is removed", clusterName), func() { - Eventually(func() (bool, v1.Role) { - role := v1.Role{} - err = env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: clusterName}, - &role) - if err != nil { - return true, v1.Role{} - } - return false, role - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v rolebinding is removed", clusterName), func() { - Eventually(func() (bool, v1.RoleBinding) { - roleBinding := v1.RoleBinding{} - err = env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: clusterName}, - &roleBinding) - if err != nil { - return true, v1.RoleBinding{} - } - return false, roleBinding - }, timeout).Should(BeTrue()) - }) - }) - } - verifyPvc := func(expectedObject persistentvolumeclaim.ExpectedObjectCalculator, pvcUid types.UID, - clusterManifest []byte, instanceName string, - ) { - pvcInfo := getPvc(expectedObject, instanceName) - Expect(pvcUid).Should(BeEquivalentTo(pvcInfo.GetUID())) - // pvc should be attached annotation with pgControlData and Cluster manifesto - expectedAnnotationKeyPresent := []string{ - utils.HibernatePgControlDataAnnotationName, - utils.HibernateClusterManifestAnnotationName, - utils.PgControldataAnnotationName, - utils.ClusterManifestAnnotationName, - } - testsUtils.ObjectHasAnnotations(&pvcInfo, expectedAnnotationKeyPresent) - expectedAnnotation := map[string]string{ - utils.HibernateClusterManifestAnnotationName: string(clusterManifest), - utils.ClusterManifestAnnotationName: string(clusterManifest), - } - testsUtils.ObjectMatchesAnnotations(&pvcInfo, expectedAnnotation) - } - - assertHibernation := func(namespace, clusterName, tableName string) { - var beforeHibernationPgWalPvcUID types.UID - var beforeHibernationPgDataPvcUID types.UID - - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, clusterName) - - By("collecting pgWal pvc details of current primary", func() { - pvcInfo := getPvc(persistentvolumeclaim.NewPgWalCalculator(), currentPrimary) - beforeHibernationPgWalPvcUID = pvcInfo.GetUID() - }) - - By("collecting pgData pvc details of current primary", func() { - pvcInfo := getPvc(persistentvolumeclaim.NewPgDataCalculator(), currentPrimary) - beforeHibernationPgDataPvcUID = pvcInfo.GetUID() - }) - - By(fmt.Sprintf("verifying hibernation status"+ - " before hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - performHibernation(HibernateOn, namespace, clusterName) - - By(fmt.Sprintf("verifying hibernation status"+ - " after hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOnStatusMessage) - }) - - // After hibernation, it will destroy all the resources generated by the cluster, - // except the PVCs that belong to the PostgreSQL primary instance. - verifyClusterResources( - namespace, - clusterName, - []persistentvolumeclaim.ExpectedObjectCalculator{ - persistentvolumeclaim.NewPgWalCalculator(), - persistentvolumeclaim.NewPgDataCalculator(), - }, - ) - - By("verifying primary pgWal pvc info", func() { - verifyPvc( - persistentvolumeclaim.NewPgWalCalculator(), - beforeHibernationPgWalPvcUID, - clusterManifest, - currentPrimary, - ) - }) - - By("verifying primary pgData pvc info", func() { - verifyPvc( - persistentvolumeclaim.NewPgDataCalculator(), - beforeHibernationPgDataPvcUID, - clusterManifest, - currentPrimary, - ) - }) - - // verifying hibernation off - performHibernation(HibernateOff, namespace, clusterName) - - By(fmt.Sprintf("verifying hibernation status after "+ - "perform hibernation off on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) - // Test data should be present after hibernation off - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) - } - - When("cluster setup with PG-WAL volume", func() { - It("hibernation process should work", func() { - const namespacePrefix = "hibernation-on-with-pg-wal" - clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithPGWalVolume) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env) - assertHibernation(namespace, clusterName, tableName) - }) - }) - When("cluster setup without PG-WAL volume", func() { - It("hibernation process should work", func() { - var beforeHibernationPgDataPvcUID types.UID - - const namespacePrefix = "hibernation-without-pg-wal" - clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithOutPGWalVolume) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFileClusterWithOutPGWalVolume, env) - // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) - clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, - clusterName) - - By("collecting pgData pvc details of current primary", func() { - pvcInfo := getPvc(persistentvolumeclaim.NewPgDataCalculator(), currentPrimary) - beforeHibernationPgDataPvcUID = pvcInfo.GetUID() - }) - - By(fmt.Sprintf("verifying hibernation status"+ - " before hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - performHibernation(HibernateOn, namespace, clusterName) - - By(fmt.Sprintf("verifying hibernation status"+ - " after hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOnStatusMessage) - }) - - // After hibernation, it will destroy all the resources generated by the cluster, - // except the PVCs that belong to the PostgreSQL primary instance. - verifyClusterResources( - namespace, - clusterName, - []persistentvolumeclaim.ExpectedObjectCalculator{persistentvolumeclaim.NewPgDataCalculator()}, - ) - - By("verifying primary pgData pvc info", func() { - verifyPvc( - persistentvolumeclaim.NewPgDataCalculator(), - beforeHibernationPgDataPvcUID, - clusterManifest, - currentPrimary, - ) - }) - - // verifying hibernation off - performHibernation(HibernateOff, namespace, clusterName) - By(fmt.Sprintf("verifying hibernation status"+ - " before hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) - // Test data should be present after hibernation off - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) - }) - }) - When("cluster hibernation after switchover", func() { - It("hibernation process should work", func() { - const namespacePrefix = "hibernation-with-switchover" - clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithPGWalVolume) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env) - AssertSwitchover(namespace, clusterName, env) - assertHibernation(namespace, clusterName, tableName) - }) - }) - }) -}) diff --git a/tests/e2e/imagevolume_extensions_test.go b/tests/e2e/imagevolume_extensions_test.go new file mode 100644 index 0000000000..c3f9430299 --- /dev/null +++ b/tests/e2e/imagevolume_extensions_test.go @@ -0,0 +1,234 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "path/filepath" + "strings" + "time" + + "github.com/blang/semver" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + postgresutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("ImageVolume Extensions", Label(tests.LabelPostgresConfiguration), func() { + const ( + clusterManifest = fixturesDir + "/imagevolume_extensions/cluster-with-extensions.yaml.template" + databaseManifest = fixturesDir + "/imagevolume_extensions/database.yaml.template" + namespacePrefix = "cluster-imagevolume-extensions" + level = tests.Low + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsLocal() { + Skip("This test is only run on local cluster") + } + if env.PostgresVersion < 18 { + Skip("This test is only run on PostgreSQL v18 or greater") + } + // Require K8S 1.33 or greater + versionInfo, err := env.Interface.Discovery().ServerVersion() + Expect(err).NotTo(HaveOccurred()) + currentVersion, err := semver.Parse(strings.TrimPrefix(versionInfo.String(), "v")) + Expect(err).NotTo(HaveOccurred()) + k8s133, err := semver.Parse("1.33.0") + Expect(err).NotTo(HaveOccurred()) + if currentVersion.LT(k8s133) { + Skip("This test runs only on Kubernetes 1.33 or greater") + } + }) + var namespace, clusterName, databaseName string + var err error + + assertVolumeMounts := func(podList *corev1.PodList, imageVolumeExtension string) { + found := false + mountPath := filepath.Join(postgres.ExtensionsBaseDirectory, imageVolumeExtension) + for _, pod := range podList.Items { + for _, volumeMount := range pod.Spec.Containers[0].VolumeMounts { + if volumeMount.Name == imageVolumeExtension && volumeMount.MountPath == mountPath { + found = true + } + } + } + Expect(found).To(BeTrue()) + } + + assertVolumes := func(podList *corev1.PodList, imageVolumeExtension string) { + found := false + for _, pod := range podList.Items { + for _, volume := range pod.Spec.Volumes { + if volume.Name == imageVolumeExtension && volume.Image.Reference != "" { + found = true + } + } + } + Expect(found).To(BeTrue()) + } + + assertExtensions := func(namespace, databaseName string) { + database := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseName, + } + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, database) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + g.Expect(database.Status.Message).Should(BeEmpty()) + for _, extension := range database.Status.Extensions { + Expect(extension.Applied).Should(HaveValue(BeTrue())) + Expect(extension.Message).Should(BeEmpty()) + } + }, 60).WithPolling(10 * time.Second).Should(Succeed()) + } + + assertPostgis := func(namespace, clusterName string) { + row, err := postgresutils.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, clusterName, postgresutils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + "SELECT ST_AsText(geom) AS wkt, ST_Area(geom) AS area"+ + " FROM (SELECT ST_GeomFromText('POLYGON((0 0, 0 10, 10 10, 10 0, 0 0))', 4326) AS geom) AS subquery;") + Expect(err).ToNot(HaveOccurred()) + + var wkt, area string + err = row.Scan(&wkt, &area) + Expect(err).ToNot(HaveOccurred()) + Expect(wkt).To(BeEquivalentTo("POLYGON((0 0,0 10,10 10,10 0,0 0))")) + Expect(area).To(BeEquivalentTo("100")) + } + + assertVector := func(namespace, clusterName string) { + row, err := postgresutils.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, clusterName, postgresutils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + "SELECT"+ + " '[1, 2, 3]'::vector AS vec1,"+ + " '[4, 5, 6]'::vector AS vec2,"+ + " cosine_distance('[1, 2, 3]'::vector, '[4, 5, 6]'::vector) AS cosine_sim,"+ + " l2_distance('[1, 2, 3]'::vector, '[4, 5, 6]'::vector) AS l2_dist,"+ + " inner_product('[1, 2, 3]'::vector, '[4, 5, 6]'::vector) AS dot_product;") + Expect(err).ToNot(HaveOccurred()) + + var vec1, vec2, cosineDist, distance, dotProduct string + err = row.Scan(&vec1, &vec2, &cosineDist, &distance, &dotProduct) + Expect(err).ToNot(HaveOccurred()) + Expect(vec1).To(BeEquivalentTo("[1,2,3]")) + Expect(vec2).To(BeEquivalentTo("[4,5,6]")) + Expect(cosineDist).To(BeEquivalentTo("0.025368153802923787")) + Expect(distance).To(BeEquivalentTo("5.196152422706632")) + Expect(dotProduct).To(BeEquivalentTo("32")) + } + + It("can use ImageVolume extensions", func() { + By("creating the cluster", func() { + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) + Expect(err).ToNot(HaveOccurred()) + databaseName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) + Expect(err).NotTo(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, clusterManifest, env) + CreateResourceFromFile(namespace, databaseManifest) + }) + + By("checking volumes and volumeMounts", func() { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + assertVolumeMounts(podList, "postgis") + assertVolumes(podList, "postgis") + }) + + By("checking extensions have been created", func() { + assertExtensions(namespace, databaseName) + }) + + By("adding a new extension to an existing Cluster", func() { + database := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseName, + } + + Eventually(func(g Gomega) { + // Updating the Cluster + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).NotTo(HaveOccurred()) + cluster.Spec.PostgresConfiguration.Extensions = append(cluster.Spec.PostgresConfiguration.Extensions, + apiv1.ExtensionConfiguration{ + Name: "pgvector", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "ghcr.io/niccolofei/pgvector:18beta2-master-bullseye", // wokeignore:rule=master + }, + }) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + + // Updating the Database + err = env.Client.Get(env.Ctx, databaseNamespacedName, database) + g.Expect(err).ToNot(HaveOccurred()) + database.Spec.Extensions = append(database.Spec.Extensions, apiv1.ExtensionSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "vector", + Ensure: apiv1.EnsurePresent, + }, + }) + g.Expect(env.Client.Update(env.Ctx, database)).To(Succeed()) + }, 60, 5).Should(Succeed()) + + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 30) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) + + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + assertVolumeMounts(podList, "pgvector") + assertVolumes(podList, "pgvector") + assertExtensions(namespace, databaseName) + }) + + By("verifying the extension's usage ", func() { + assertPostgis(namespace, clusterName) + assertVector(namespace, clusterName) + }) + }) +}) diff --git a/tests/e2e/initdb_test.go b/tests/e2e/initdb_test.go index d399e033af..5906d70a2a 100644 --- a/tests/e2e/initdb_test.go +++ b/tests/e2e/initdb_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -22,7 +25,8 @@ import ( "strings" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,19 +51,20 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f namespace, clusterName, tableName string, - dbName utils.DatabaseName, + dbName exec.DatabaseName, expectedCount int, ) { query := fmt.Sprintf("SELECT count(*) FROM %s", tableName) - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf( "querying the %s table in the %s database defined by postInit SQL", tableName, dbName), func() { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, }, dbName, @@ -88,7 +93,7 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "initdb-postqueries" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(namespace, postInitSQLSecretRef) @@ -127,15 +132,16 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f "app", 10000) By("checking inside the database the default locale", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, }, "postgres", - "select datcollate from pg_database where datname='template0'") + "select datcollate from pg_catalog.pg_database where datname='template0'") Expect(err).ToNot(HaveOccurred()) Expect(stdout, err).To(Equal("C\n")) }) @@ -154,22 +160,23 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "initdb-locale" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, postInitSQLCluster, env) By("checking inside the database", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, }, "postgres", - "select datcollate from pg_database where datname='template0'") + "select datcollate from pg_catalog.pg_database where datname='template0'") Expect(err).ToNot(HaveOccurred()) - Expect(stdout, err).To(Equal("en_US.utf8\n")) + Expect(strings.TrimSpace(stdout), err).To(Equal("C")) }) }) }) diff --git a/tests/e2e/logs_test.go b/tests/e2e/logs_test.go index 59a2f11337..905171b732 100644 --- a/tests/e2e/logs_test.go +++ b/tests/e2e/logs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -28,7 +31,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -50,23 +55,26 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { const sampleFile = fixturesDir + "/json_logs/cluster-json-logs.yaml.template" var namespaceErr error // Create a cluster in a namespace we'll delete after the test - namespace, namespaceErr = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, namespaceErr = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(namespaceErr).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("verifying the presence of possible logger values", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) for _, pod := range podList.Items { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, pod.GetName(), + ) Expect(err).NotTo(HaveOccurred(), "unable to parse json logs") Expect(logEntries).ToNot(BeEmpty(), "no logs found") // Logger field Assertions - isPgControlDataLoggerFound := testsUtils.HasLogger(logEntries, "pg_controldata") + isPgControlDataLoggerFound := logs.HasLogger(logEntries, "pg_controldata") Expect(isPgControlDataLoggerFound).To(BeTrue(), fmt.Sprintf("pg_controldata logger not found in pod %v logs", pod.GetName())) - isPostgresLoggerFound := testsUtils.HasLogger(logEntries, "postgres") + isPostgresLoggerFound := logs.HasLogger(logEntries, "postgres") Expect(isPostgresLoggerFound).To(BeTrue(), fmt.Sprintf("postgres logger not found in pod %v logs", pod.GetName())) } @@ -74,7 +82,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { By("verifying the format of error queries being logged", func() { errorTestQuery := "selecct 1\nwith newlines\n" - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) timeout := 300 for _, pod := range podList.Items { @@ -86,16 +94,19 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", errorTestQuery) return queryError - }, RetryTimeout, PollingTime).ShouldNot(BeNil()) + }, RetryTimeout, PollingTime).ShouldNot(Succeed()) // Eventually the error log line will be logged Eventually(func(g Gomega) bool { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, pod.GetName(), + ) g.Expect(err).ToNot(HaveOccurred()) // Gather the record containing the wrong query result - return testsUtils.AssertQueryRecord( + return logs.AssertQueryRecord( logEntries, errorTestQuery, queryError.Error(), @@ -107,7 +118,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { By("verifying only the primary instance logs write queries", func() { errorTestQuery := "ccreate table test(var text)" - primaryPod, _ := env.GetClusterPrimary(namespace, clusterName) + primaryPod, _ := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) timeout := 300 var queryError error @@ -118,19 +129,22 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { *primaryPod, specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", errorTestQuery) return queryError - }, RetryTimeout, PollingTime).ShouldNot(BeNil()) + }, RetryTimeout, PollingTime).ShouldNot(Succeed()) // Expect the query to be eventually logged on the primary Eventually(func() (bool, error) { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, primaryPod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, primaryPod.GetName(), + ) if err != nil { GinkgoWriter.Printf("Error reported while gathering primary pod log %s\n", err.Error()) return false, err } // Gather the record containing the wrong query result - return testsUtils.AssertQueryRecord(logEntries, errorTestQuery, queryError.Error(), + return logs.AssertQueryRecord(logEntries, errorTestQuery, queryError.Error(), logpipe.LoggingCollectorRecordName), nil }, timeout).Should(BeTrue()) @@ -146,12 +160,15 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { // Expect the query not to be logged on replicas for _, pod := range podList.Items { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, pod.GetName(), + ) Expect(err).NotTo(HaveOccurred()) Expect(logEntries).ToNot(BeEmpty()) // No record should be returned in this case - isQueryRecordContained := testsUtils.AssertQueryRecord( + isQueryRecordContained := logs.AssertQueryRecord( logEntries, queryError.Error(), errorTestQuery, @@ -164,18 +181,18 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { By("verifying pg_rewind logs after deleting the old primary pod", func() { // Force-delete the primary - currentPrimary, _ := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, _ := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) quickDelete := &client.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - deletePodError := env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + deletePodError := pods.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete) Expect(deletePodError).ToNot(HaveOccurred()) // Expect a new primary to be elected timeout := 180 Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { GinkgoWriter.Printf("Error reported while getting current primary %s\n", err.Error()) return "", err @@ -189,14 +206,17 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { Eventually(func() (bool, error) { // Gather pod logs in the form of a JSON slice - logEntries, err := testsUtils.ParseJSONLogs(namespace, currentPrimary.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, currentPrimary.GetName(), + ) if err != nil { GinkgoWriter.Printf("Error reported while getting the 'pg_rewind' logger in old primary %s, %s\n", currentPrimary, err.Error()) return false, err } // Expect pg_rewind logger to eventually be present on the old primary logs - return testsUtils.HasLogger(logEntries, "pg_rewind"), nil + return logs.HasLogger(logEntries, "pg_rewind"), nil }, timeout).Should(BeTrue()) }) }) @@ -221,10 +241,10 @@ var _ = Describe("JSON log output unit tests", Label(tests.LabelObservability), Expect(err).ToNot(HaveOccurred()) It("Can check valid logging_collector record for query", func() { Expect(parsedRecord).NotTo(BeNil()) - Expect(testsUtils.CheckRecordForQuery(parsedRecord, errorTestQuery, user, database, message)).To(BeTrue()) + Expect(logs.CheckRecordForQuery(parsedRecord, errorTestQuery, user, database, message)).To(BeTrue()) }) It("Can check valid logging_collector ", func() { Expect(parsedRecord).NotTo(BeNil()) - Expect(testsUtils.IsWellFormedLogForLogger(parsedRecord, "postgres")).To(BeTrue()) + Expect(logs.IsWellFormedLogForLogger(parsedRecord, "postgres")).To(BeTrue()) }) }) diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index c8ed83f364..5e3cb25f84 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -24,14 +27,18 @@ import ( "github.com/lib/pq" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -56,6 +63,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Context("plain vanilla cluster", Ordered, func() { const ( namespacePrefix = "managed-roles" + secretName = "cluster-example-dante" username = "dante" appUsername = "app" password = "dante" @@ -64,47 +72,22 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic userWithPerpetualPass = "boccaccio" userWithHashedPassword = "cavalcanti" ) - var clusterName, secretName, namespace string - var secretNameSpacedName *types.NamespacedName + var clusterName, namespace string BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) - secretName = "cluster-example-dante" - secretNameSpacedName = &types.NamespacedName{ - Namespace: namespace, - Name: secretName, - } - By("setting up cluster with managed roles", func() { AssertCreateCluster(namespace, clusterName, clusterManifest, env) }) }) - assertUserExists := func(namespace, primaryPod, username string, shouldExists bool) { - Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPod, - }, - utils.DatabaseName("postgres"), - "\\du") - g.Expect(err).ToNot(HaveOccurred()) - if shouldExists { - g.Expect(stdout).To(ContainSubstring(username)) - } else { - g.Expect(stdout).NotTo(ContainSubstring(username)) - } - }, 60).Should(Succeed()) - } - assertInRoles := func(namespace, primaryPod, roleName string, expectedRoles []string) { slices.Sort(expectedRoles) Eventually(func() []string { @@ -112,16 +95,17 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic query := `SELECT mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT string_agg(pg_get_userbyid(roleid), ',') as inroles, member - FROM pg_auth_members GROUP BY member + SELECT string_agg(pg_catalog.pg_get_userbyid(roleid), ',') as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname =` + pq.QuoteLiteral(roleName) - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod, }, - utils.DatabaseName("postgres"), + postgres.PostgresDBName, query) if err != nil { return []string{ERROR} @@ -132,6 +116,23 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }, 30).Should(BeEquivalentTo(expectedRoles)) } + assertRoleStatus := func(namespace, clusterName, query, expectedResult string) { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(stdout)).To(Equal(expectedResult)) + }, 30).Should(Succeed()) + } + It("can create roles specified in the managed roles stanza", func() { rolCanLoginInSpec := true rolSuperInSpec := false @@ -143,27 +144,34 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic rolConnLimitInSpec := 4 By("ensuring the roles created in the managed stanza are in the database with correct attributes", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertUserExists(namespace, primaryPodInfo.Name, username, true) - assertUserExists(namespace, primaryPodInfo.Name, userWithPerpetualPass, true) - assertUserExists(namespace, primaryPodInfo.Name, userWithHashedPassword, true) - assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, false) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(username), "t"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(userWithPerpetualPass), "t"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(userWithHashedPassword), "t"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(unrealizableUser), "f"), 30).Should(Succeed()) - query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ + query := fmt.Sprintf("SELECT true FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ - "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec, rolCreateDBInSpec, + "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec, + rolCreateDBInSpec, rolCreateRoleInSpec, rolInheritInSpec, rolReplicationInSpec, rolByPassRLSInSpec, rolConnLimitInSpec) - query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_roles WHERE rolname='%s'", userWithPerpetualPass) + query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_roles WHERE rolname='%s'", + userWithPerpetualPass) for _, q := range []string{query, query2} { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, }, - utils.DatabaseName("postgres"), + postgres.PostgresDBName, q) Expect(err).ToNot(HaveOccurred()) Expect(stdout).To(Equal("t\n")) @@ -171,53 +179,43 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verifying connectivity of new managed role", func() { - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", password, *psqlClientPod, 30, env) - - AssertConnection(rwService, userWithHashedPassword, "postgres", userWithHashedPassword, *psqlClientPod, 30, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, password, env) }) By("ensuring the app role has been granted createdb in the managed stanza", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertUserExists(namespace, primaryPodInfo.Name, appUsername, true) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, + roleExistsQuery(appUsername), "t"), 30).Should(Succeed()) query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+ - "FROM pg_roles WHERE rolname='%s'", appUsername) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - Expect(err).ToNot(HaveOccurred()) - Expect(stdout).To(Equal("t\n")) + "FROM pg_catalog.pg_roles WHERE rolname='%s'", appUsername) + assertRoleStatus(namespace, clusterName, query, "t") }) By("verifying connectivity of app user", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) appUserSecret := corev1.Secret{} - err = utils.GetObject( - env, + err = objects.Get( + env.Ctx, env.Client, types.NamespacedName{Name: cluster.GetApplicationSecretName(), Namespace: namespace}, &appUserSecret, ) Expect(err).NotTo(HaveOccurred()) pass := string(appUserSecret.Data["password"]) - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, appUsername, "postgres", pass, *psqlClientPod, 30, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, appUsername, pass, env) }) By("Verify show unrealizable role configurations in the status", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() int { @@ -237,10 +235,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic expectedCreateDB := false expectedCreateRole := true expectedConnLmt := int64(10) - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := services.GetReadWriteServiceName(clusterName) By("updating role attribute in spec", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -253,39 +251,30 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify the role has been updated in the database", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - Eventually(func() string { - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ - "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", - username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - if err != nil { - return "" - } - return stdout - }, 30).Should(Equal("1\n")) + query := fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ + "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", + username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) + assertRoleStatus(namespace, clusterName, query, "1") }) By("the connection should fail since we disabled the login", func() { - dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", - rwService, username, "postgres", password) - timeout := time.Second * 10 - _, _, err := env.ExecCommand(env.Ctx, *psqlClientPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "SELECT 1") + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, rwService, postgres.PostgresDBName, username, password, + ) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + _, err = conn.Exec("SELECT 1") Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not permitted to log in")) }) By("enable Login again", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() updated.Spec.Managed.Roles[0].Login = true @@ -293,16 +282,22 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) + By("verifying Login is now enabled", func() { + expectedLogin = true + query := fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ + "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", + username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) + assertRoleStatus(namespace, clusterName, query, "1") + }) + By("the connectivity should be success again", func() { - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", password, *psqlClientPod, 30, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, password, env) }) }) It("Can add role with all attribute omitted and verify it is default", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) const ( defaultRolCanLogin = false defaultRolSuper = false @@ -314,7 +309,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic defaultRolConnLimit = int64(-1) ) By("Add role new_role with all attribute omit", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -327,32 +322,20 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify new_role exists with all attribute default", func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ - "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ - "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin, - defaultRolSuper, defaultRolCreateDB, - defaultRolCreateRole, defaultRolInherit, defaultRolReplication, - defaultRolByPassRLS, defaultRolConnLimit) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - if err != nil { - return "" - } - return stdout - }, 30).Should(Equal("1\n")) + query := fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ + "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ + "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin, + defaultRolSuper, defaultRolCreateDB, + defaultRolCreateRole, defaultRolInherit, defaultRolReplication, + defaultRolByPassRLS, defaultRolConnLimit) + + assertRoleStatus(namespace, clusterName, query, "1") }) }) It("Can update role comment and verify changes in db ", func() { By("Update comment for role new_role", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -368,56 +351,27 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Verify comments update in db for %s", newUserName), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ - " FROM pg_catalog.pg_authid WHERE rolname='%s'", - newUserName) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - if err != nil { - return ERROR - } - return stdout - }, 30).Should(Equal(fmt.Sprintf("This is user %s\n", newUserName))) + query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ + " FROM pg_catalog.pg_authid WHERE rolname='%s'", + newUserName) + assertRoleStatus(namespace, clusterName, query, fmt.Sprintf("This is user %s", newUserName)) }) By(fmt.Sprintf("Verify comments update in db for %s", username), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ - " FROM pg_catalog.pg_authid WHERE rolname='%s'", - username) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - if err != nil { - return ERROR - } - return stdout - }, 30).Should(Equal("\n")) + query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ + " FROM pg_catalog.pg_authid WHERE rolname='%s'", + username) + assertRoleStatus(namespace, clusterName, query, "") }) }) It("Can update role membership and verify changes in db ", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Remove invalid parent role from unrealizableUser and verify user in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -429,15 +383,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) - assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, true) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) }) By("Add role in InRole for role new_role and verify in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -451,16 +406,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic } err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) - Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile) - }, 30).Should(Equal(0)) - assertInRoles(namespace, primaryPodInfo.Name, newUserName, []string{"postgres", username}) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile).To(BeEmpty()) + }, 30).Should(Succeed()) + assertInRoles(namespace, primaryPod.Name, newUserName, []string{"postgres", username}) }) By("Remove parent role from InRole for role new_role and verify in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -473,16 +428,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic } err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) - Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile) - }, 30).Should(Equal(0)) - assertInRoles(namespace, primaryPodInfo.Name, newUserName, []string{username}) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile).To(BeEmpty()) + }, 30).Should(Succeed()) + assertInRoles(namespace, primaryPod.Name, newUserName, []string{username}) }) By("Mock the error for unrealizable User and verify user in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -494,81 +449,81 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) // user not changed - assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, true) - Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile) - }, 30).Should(Equal(1)) - Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser]) - }, 30).Should(Equal(1)) - Eventually(func() string { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser][0] - }, 30).Should(ContainSubstring(fmt.Sprintf("role \"%s\" is a member of role \"%s\"", - unrealizableUser, unrealizableUser))) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile).To(HaveLen(1)) + }, 30).Should(Succeed()) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser]).To(HaveLen(1)) + }, 30).Should(Succeed()) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser][0]).To( + ContainSubstring(fmt.Sprintf("role \"%s\" is a member of role \"%s\"", + unrealizableUser, unrealizableUser))) + }, 30).Should(Succeed()) }) }) It("Can update role password in secrets and db and verify the connectivity", func() { + var err error newPassword := "ThisIsNew" - By("update password from secrets", func() { - var secret corev1.Secret - err := env.Client.Get(env.Ctx, *secretNameSpacedName, &secret) - Expect(err).ToNot(HaveOccurred()) - updated := secret.DeepCopy() - updated.Data["password"] = []byte(newPassword) - err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(&secret)) - Expect(err).ToNot(HaveOccurred()) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + By("update password from secrets", func() { + AssertUpdateSecret("password", newPassword, secretName, + namespace, clusterName, 30, env) }) By("Verify connectivity using changed password in secret", func() { - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", newPassword, *psqlClientPod, 30, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, newPassword, env) }) By("Update password in database", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) query := fmt.Sprintf("ALTER ROLE %s WITH PASSWORD %s", username, pq.QuoteLiteral(newPassword)) - _, _, err = env.ExecQueryInInstancePod( - utils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, - PodName: primaryPodInfo.Name, + PodName: primaryPod.Name, }, - utils.DatabaseName("postgres"), + postgres.PostgresDBName, query) Expect(err).ToNot(HaveOccurred()) }) - By("Verify password in secrets could still valid", func() { - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - AssertConnection(rwService, username, "postgres", newPassword, *psqlClientPod, 60, env) + By("Verify password in secrets is still valid", func() { + rwService := services.GetReadWriteServiceName(clusterName) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, newPassword, env) }) }) It("Can update role password validUntil and verify in the database", func() { newValidUntilString := "2023-04-04T00:00:00.000000Z" By("Update comment for role new_role", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() for i, r := range updated.Spec.Managed.Roles { if r.Name == newUserName { - updated.Spec.Managed.Roles[i].ValidUntil = &v1.Time{} + updated.Spec.Managed.Roles[i].ValidUntil = &metav1.Time{} } if r.Name == username { tt, err := time.Parse(time.RFC3339Nano, newValidUntilString) Expect(err).ToNot(HaveOccurred()) - nt := v1.NewTime(tt) + nt := metav1.NewTime(tt) updated.Spec.Managed.Roles[i].ValidUntil = &nt } } @@ -577,53 +532,24 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Verify valid until is removed in db for %s", newUserName), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_authid"+ - " WHERE rolname='%s'", - newUserName) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - if err != nil { - return ERROR - } - return stdout - }).Should(Equal("t\n")) + query := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_authid"+ + " WHERE rolname='%s'", + newUserName) + assertRoleStatus(namespace, clusterName, query, "t") }) By(fmt.Sprintf("Verify valid until update in db for %s", username), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT rolvaliduntil='%s' FROM pg_catalog.pg_authid "+ - " WHERE rolname='%s'", - newValidUntilString, username) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.DatabaseName("postgres"), - query) - if err != nil { - return ERROR - } - return stdout - }, 30).Should(Equal("t\n")) + query := fmt.Sprintf("SELECT rolvaliduntil='%s' FROM pg_catalog.pg_authid "+ + " WHERE rolname='%s'", + newValidUntilString, username) + assertRoleStatus(namespace, clusterName, query, "t") }) }) It("Can drop role with ensure absent option", func() { By("Delete role new_role with EnsureOption ", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -637,9 +563,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify new_role not existed in db", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertUserExists(namespace, primaryPodInfo.Name, newUserName, false) + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, + roleExistsQuery(newUserName), "f"), 30).Should(Succeed()) }) }) }) diff --git a/tests/e2e/managed_services_test.go b/tests/e2e/managed_services_test.go index 68131aded9..7409707cb6 100644 --- a/tests/e2e/managed_services_test.go +++ b/tests/e2e/managed_services_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -27,7 +30,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,14 +58,14 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa It("should create and delete a rw managed service", func(ctx SpecContext) { const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-rw.yaml.template" const serviceName = "test-rw" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("ensuring the service is created", func() { @@ -78,32 +83,32 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa By("ensuring the service is deleted when removed from the additional field", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{} return env.Client.Update(ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) - AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ManagedServices], env) Eventually(func(g Gomega) { var serviceRW corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Name: serviceName, Namespace: namespace}, &serviceRW) g.Expect(apierrs.IsNotFound(err)).To(BeTrue()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) }) }) It("should properly handle disabledDefaultServices field", func(ctx SpecContext) { const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-no-default.yaml.template" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) ro := specs.CreateClusterReadOnlyService(*cluster) @@ -124,45 +129,45 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa By("creating them when they are re-enabled", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{} return env.Client.Update(ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) - AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ManagedServices], env) Eventually(func(g Gomega) { var service corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rw.Name}, &service) g.Expect(err).ToNot(HaveOccurred()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) Eventually(func(g Gomega) { var service corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: ro.Name}, &service) g.Expect(err).ToNot(HaveOccurred()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) Eventually(func(g Gomega) { var service corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: r.Name}, &service) g.Expect(err).ToNot(HaveOccurred()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) }) }) It("should properly handle replace update strategy", func(ctx SpecContext) { const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-replace-strategy.yaml.template" const serviceName = "test-rw" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) var creationTimestamp metav1.Time @@ -185,11 +190,11 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa By("updating the service definition", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.ObjectMeta.Labels["new-label"] = "new" return env.Client.Update(ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) }) By("expecting the service to be recreated", func() { @@ -200,7 +205,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa g.Expect(service.Labels["new-label"]).To(Equal("new")) g.Expect(service.UID).ToNot(Equal(uid)) g.Expect(service.CreationTimestamp).ToNot(Equal(creationTimestamp)) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) }) }) }) diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index 34df963db0..d44dc12a9b 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -21,6 +24,7 @@ import ( "regexp" "strconv" "strings" + "time" corev1 "k8s.io/api/core/v1" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,7 +32,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -92,29 +101,30 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { AssertGatherMetrics := func(namespacePrefix, clusterFile string) { // Create the cluster namespace - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 2, 1) - metricsClusterName, err := env.GetResourceNameFromYAML(clusterFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, clusterFile, env) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).NotTo(HaveOccurred()) // Check metrics on each pod By("ensuring metrics are correct on each pod", func() { - podList, err := env.GetClusterPodList(namespace, metricsClusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) // Gather metrics in each pod for _, pod := range podList.Items { By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() { - out, err := utils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, + cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred(), "while getting pod metrics") expectedMetrics := buildExpectedMetrics(cluster, !specs.IsPodPrimary(pod)) assertIncludesMetrics(out, expectedMetrics) @@ -140,20 +150,20 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { It("can gather metrics with multiple target databases", func() { const namespacePrefix = "metrics-target-databases-e2e" - metricsClusterName, err := env.GetResourceNameFromYAML(clusterMetricsDBFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterMetricsDBFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCustomMetricsResourcesExist(namespace, customQueriesTargetDBSampleFile, 1, 1) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, clusterMetricsDBFile, env) - AssertCreationOfTestDataForTargetDB(namespace, metricsClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, metricsClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, metricsClusterName, targetDBSecret, testTableName, psqlClientPod) + AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBSecret, testTableName) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) @@ -162,10 +172,10 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { It("can gather default metrics details", func() { const clusterWithDefaultMetricsFile = fixturesDir + "/base/cluster-storage-class.yaml.template" const namespacePrefix = "default-metrics-details" - metricsClusterName, err := env.GetResourceNameFromYAML(clusterWithDefaultMetricsFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithDefaultMetricsFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, metricsClusterName, clusterWithDefaultMetricsFile, env) @@ -181,30 +191,32 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { return err }, 10).ShouldNot(HaveOccurred()) }) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) - collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), true) + collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), + true) }) It("can gather metrics depending on the predicate query", func() { // Create the cluster namespace const namespacePrefix = "predicate-query-metrics-e2e" - metricsClusterName, err := env.GetResourceNameFromYAML(clusterMetricsPredicateQueryFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterMetricsPredicateQueryFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1, 0) + AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1, + 0) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, clusterMetricsPredicateQueryFile, env) By("ensuring only metrics with a positive predicate are collected", func() { - podList, err := env.GetClusterPodList(namespace, metricsClusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) // We expect only the metrics that have a predicate_query valid. @@ -223,7 +235,8 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { // Gather metrics in each pod for _, pod := range podList.Items { By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() { - out, err := utils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, + cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred(), "while getting pod metrics") assertIncludesMetrics(out, expectedMetrics) assertExcludesMetrics(out, nonCollectableMetrics) @@ -237,18 +250,19 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { const defaultMonitoringQueriesDisableSampleFile = fixturesDir + "/metrics/cluster-disable-default-metrics.yaml.template" const namespacePrefix = "disable-default-metrics" - metricsClusterName, err := env.GetResourceNameFromYAML(defaultMonitoringQueriesDisableSampleFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, defaultMonitoringQueriesDisableSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, defaultMonitoringQueriesDisableSampleFile, env) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) - collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), false) + collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), + false) }) It("execute custom queries against the application database on replica clusters", func() { @@ -263,15 +277,15 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { ) // Fetching the source cluster name - srcClusterName, err := env.GetResourceNameFromYAML(srcClusterSampleFile) + srcClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, srcClusterSampleFile) Expect(err).ToNot(HaveOccurred()) // Fetching replica cluster name - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleFile) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleFile) Expect(err).ToNot(HaveOccurred()) // create namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating and verifying custom queries configmap @@ -287,37 +301,65 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { srcClusterDatabaseName, replicaClusterSampleFile, testTableName, - psqlClientPod) + ) By(fmt.Sprintf("grant select permission for %v table to pg_monitor", testTableName), func() { - cmd := fmt.Sprintf("GRANT SELECT ON %v TO pg_monitor", testTableName) - appUser, appUserPass, err := utils.GetCredentials(srcClusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := utils.GetHostName(namespace, srcClusterName, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = utils.RunQueryFromPod( - psqlClientPod, - host, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + srcClusterName, srcClusterDatabaseName, - appUser, - appUserPass, - cmd, - env) + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + cmd := fmt.Sprintf("GRANT SELECT ON %v TO pg_monitor", testTableName) + _, err = conn.Exec(cmd) Expect(err).ToNot(HaveOccurred()) }) - replicaCluster, err := env.GetCluster(namespace, replicaClusterName) + replicaCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, replicaClusterName) Expect(err).ToNot(HaveOccurred()) By("collecting metrics on each pod and checking that the table has been found", func() { - podList, err := env.GetClusterPodList(namespace, replicaClusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, replicaClusterName) Expect(err).ToNot(HaveOccurred()) // Gather metrics in each pod expectedMetric := fmt.Sprintf("cnpg_%v_row_count 3", testTableName) for _, pod := range podList.Items { - out, err := utils.RetrieveMetricsFromInstance(env, pod, replicaCluster.IsMetricsTLSEnabled()) - Expect(err).Should(Not(HaveOccurred())) - Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric)) + // Wait a few seconds for the GRANT to be replicated + Eventually(func(g Gomega) { + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + srcClusterDatabaseName, + fmt.Sprintf( + "SELECT has_table_privilege('pg_monitor', '%v', 'SELECT')", + testTableName, + ), + ) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(out)).To(BeEquivalentTo("t")) + }).WithTimeout( + time.Duration(timeouts.DefaultTestTimeouts[timeouts.Short])*time.Second, + ).Should(Succeed(), fmt.Sprintf("on pod %v", pod.Name)) + + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, + replicaCluster.IsMetricsTLSEnabled()) + Expect(err).ShouldNot(HaveOccurred(), + fmt.Sprintf("while getting pod metrics for pod: %v", pod.Name)) + Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric), + fmt.Sprintf("expected metric %v not found in pod %v", expectedMetric, pod.Name)) } }) diff --git a/tests/e2e/monitoring_test.go b/tests/e2e/monitoring_test.go index 6fabfaf38d..415e88694e 100644 --- a/tests/e2e/monitoring_test.go +++ b/tests/e2e/monitoring_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +13,22 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/types" + k8client "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -26,6 +36,24 @@ import ( // Set of tests that set up a cluster with monitoring support enabled var _ = Describe("PodMonitor support", Serial, Label(tests.LabelObservability), func() { + getPodMonitorFunc := func( + ctx context.Context, + crudClient k8client.Client, + namespace, name string, + ) (*monitoringv1.PodMonitor, error) { + podMonitor := &monitoringv1.PodMonitor{} + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + + err := objects.Get(ctx, crudClient, namespacedName, podMonitor) + if err != nil { + return nil, err + } + return podMonitor, nil + } + const ( namespacePrefix = "cluster-monitoring-e2e" level = tests.Medium @@ -53,13 +81,13 @@ var _ = Describe("PodMonitor support", Serial, Label(tests.LabelObservability), }) It("sets up a cluster enabling PodMonitor feature", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterDefaultName, clusterDefaultMonitoringFile, env) By("verifying PodMonitor existence", func() { - podMonitor, err := env.GetPodMonitor(namespace, clusterDefaultName) + podMonitor, err := getPodMonitorFunc(env.Ctx, env.Client, namespace, clusterDefaultName) Expect(err).ToNot(HaveOccurred()) endpoints := podMonitor.Spec.PodMetricsEndpoints diff --git a/tests/e2e/nodeselector_test.go b/tests/e2e/nodeselector_test.go index 761b3ecfe3..a771194863 100644 --- a/tests/e2e/nodeselector_test.go +++ b/tests/e2e/nodeselector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -24,7 +27,9 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,7 +53,7 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { // We create a namespace and verify it exists By(fmt.Sprintf("having a %v namespace", namespace), func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a namespace should be quick @@ -74,10 +79,10 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { // We check the error to verify that's the case By("verifying that the pods can't be scheduled", func() { timeout := 120 - Eventually(func() bool { + Eventually(func(g Gomega) { isPending := false - podList, err := env.GetPodList(namespace) - Expect(err).ToNot(HaveOccurred()) + podList, err := pods.List(env.Ctx, env.Client, namespace) + g.Expect(err).ToNot(HaveOccurred()) if len(podList.Items) > 0 { if len(podList.Items[0].Status.Conditions) > 0 { if podList.Items[0].Status.Phase == "Pending" && strings.Contains(podList.Items[0].Status.Conditions[0].Message, @@ -91,8 +96,8 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { } } } - return isPending - }, timeout).Should(BeEquivalentTo(true)) + g.Expect(isPending).To(BeTrue()) + }, timeout).Should(Succeed()) }) }) }) @@ -107,13 +112,13 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { var nodeName string var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // We label one node with the label we have defined in the cluster // YAML definition By("labelling a node", func() { - nodeList, err := env.GetNodeList() + nodeList, err := nodes.List(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // We want to label a node that is uncordoned and untainted, @@ -121,19 +126,19 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { for _, nodeDetails := range nodeList.Items { if (nodeDetails.Spec.Unschedulable != true) && (len(nodeDetails.Spec.Taints) == 0) { - nodeName = nodeDetails.ObjectMeta.Name + nodeName = nodeDetails.Name break } } cmd := fmt.Sprintf("kubectl label node %v nodeselectortest=exists --overwrite", nodeName) - _, _, err = utils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) // All the pods should be running on the labeled node By("confirm pods run on the labelled node", func() { AssertCreateCluster(namespace, clusterName, sampleFile, env) - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) for _, podDetails := range podList.Items { if podDetails.Status.Phase == "Running" { diff --git a/tests/e2e/openshift_upgrade_test.go b/tests/e2e/openshift_upgrade_test.go index 3305128b8f..0ff9997137 100644 --- a/tests/e2e/openshift_upgrade_test.go +++ b/tests/e2e/openshift_upgrade_test.go @@ -1,25 +1,37 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ + package e2e import ( + "fmt" + "regexp" + "strings" + "github.com/blang/semver" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/openshift" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,21 +65,21 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere ocp412, err = semver.Make("4.12.0") Expect(err).ToNot(HaveOccurred()) // Get current OpenShift Versions - ocpVersion, err = testsUtils.GetOpenshiftVersion(env) + ocpVersion, err = openshift.GetOpenshiftVersion(env.Ctx, env.RestClientConfig) Expect(err).ToNot(HaveOccurred()) }) cleanupOperator := func() error { // Cleanup the Operator - err = testsUtils.DeleteOperatorCRDs(env) + err = openshift.DeleteOperatorCRDs(env.Ctx, env.Client) if err != nil { return err } - err = testsUtils.DeleteSubscription(env) + err = openshift.DeleteSubscription(env.Ctx, env.Client) if err != nil { return err } - err = testsUtils.DeleteCSV(env) + err = openshift.DeleteCSV(env.Ctx, env.Client) if err != nil { return err } @@ -78,7 +90,7 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere err := cleanupOperator() Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - _, err = env.GetOperatorPod() + _, err = operator.GetPod(env.Ctx, env.Client) return err }, 120).Should(HaveOccurred()) } @@ -86,21 +98,21 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere assertClusterIsAligned := func(namespace, clusterName string) { By("Verifying the cluster pods have been upgraded", func() { Eventually(func() bool { - return testsUtils.HasOperatorBeenUpgraded(env) + return operator.HasBeenUpgraded(env.Ctx, env.Client) }).Should(BeTrue()) - operatorPodName, err := testsUtils.GetOperatorPodName(env) + operatorPodName, err := operator.GetPodName(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - expectedVersion, err := testsUtils.GetOperatorVersion("openshift-operators", operatorPodName) + expectedVersion, err := operator.Version("openshift-operators", operatorPodName) Expect(err).ToNot(HaveOccurred()) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { Eventually(func() (string, error) { - return testsUtils.GetManagerVersion(namespace, pod.Name) + return GetManagerVersion(namespace, pod.Name) }, 300).Should(BeEquivalentTo(expectedVersion)) } }) @@ -110,13 +122,14 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere // Apply a subscription in the openshift-operators namespace. // This should create the operator By("Applying the initial subscription", func() { - err := testsUtils.CreateSubscription(env, initialSubscription) + err := openshift.CreateSubscription(env.Ctx, env.Client, initialSubscription) Expect(err).ToNot(HaveOccurred()) - AssertOperatorIsReady() + Expect(operator.WaitForReady(env.Ctx, env.Client, uint(testTimeouts[timeouts.OperatorIsReady]), + true)).Should(Succeed()) }) // Gather the version and semantic Versions of the operator - currentVersion, err := testsUtils.GetSubscriptionVersion(env) + currentVersion, err := openshift.GetSubscriptionVersion(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) currentSemVersion, err := semver.Make(currentVersion) Expect(err).ToNot(HaveOccurred()) @@ -124,27 +137,28 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere Expect(err).ToNot(HaveOccurred()) // Create a Cluster in a namespace we'll delete at the end - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("Patching the status condition if required", func() { // Patch the status conditions if we are running on a pre new-policy release if currentSemVersion.LT(newPolicyRelease) { - err = testsUtils.PatchStatusCondition(namespace, clusterName, env) + err = openshift.PatchStatusCondition(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) } }) By("Applying the upgrade subscription", func() { // Apply the new subscription to upgrade to a new version of the operator - err = testsUtils.UpgradeSubscription(env, upgradeSubscription) + err = openshift.UpgradeSubscription(env.Ctx, env.Client, upgradeSubscription) Expect(err).ToNot(HaveOccurred()) Eventually(func() (string, error) { - return testsUtils.GetSubscriptionVersion(env) + return openshift.GetSubscriptionVersion(env.Ctx, env.Client) }, 300). ShouldNot(BeEquivalentTo(currentVersion)) - AssertOperatorIsReady() + Expect(operator.WaitForReady(env.Ctx, env.Client, uint(testTimeouts[timeouts.OperatorIsReady]), + true)).Should(Succeed()) }) // Check if the upgrade was successful by making sure all the pods @@ -152,7 +166,7 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere assertClusterIsAligned(namespace, clusterName) } - It("stable-v1 to alpha, currently version 1.22", func() { + It("stable-v1 to alpha", func() { if ocpVersion.GT(ocp412) { Skip("This test runs only on OCP 4.12 or lower") } @@ -160,3 +174,18 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere applyUpgrade("stable-v1", "alpha") }) }) + +// GetManagerVersion returns the current manager version of a given pod +func GetManagerVersion(namespace, podName string) (string, error) { + out, _, err := run.Unchecked(fmt.Sprintf( + "kubectl -n %v exec %v -c postgres -- /controller/manager version", + namespace, + podName, + )) + if err != nil { + return "", err + } + versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`) + ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1] + return ver, nil +} diff --git a/tests/e2e/operator_deployment_test.go b/tests/e2e/operator_deployment_test.go index 9b121ccfbc..17e1ff6c77 100644 --- a/tests/e2e/operator_deployment_test.go +++ b/tests/e2e/operator_deployment_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,16 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -34,10 +39,11 @@ var _ = Describe("PostgreSQL operator deployment", Label(tests.LabelBasic, tests It("sets up the operator", func() { By("having a pod for the operator in state ready", func() { - AssertOperatorIsReady() + Expect(operator.WaitForReady(env.Ctx, env.Client, uint(testTimeouts[timeouts.OperatorIsReady]), + true)).Should(Succeed()) }) By("having a deployment for the operator in state ready", func() { - ready, err := env.IsOperatorDeploymentReady() + ready, err := operator.IsReady(env.Ctx, env.Client, true) Expect(err).ToNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) diff --git a/tests/e2e/operator_ha_test.go b/tests/e2e/operator_ha_test.go index 878f93df9d..6eb1a78509 100644 --- a/tests/e2e/operator_ha_test.go +++ b/tests/e2e/operator_ha_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -21,7 +24,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -49,11 +54,11 @@ var _ = Describe("Operator High Availability", Serial, It("can work as HA mode", func() { // Make sure there's at least one pod of the operator - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) // Get Operator Pod name - operatorPodName, err := env.GetOperatorPod() + operatorPodName, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) By("having an operator already running", func() { @@ -64,11 +69,11 @@ var _ = Describe("Operator High Availability", Serial, }) // Get operator namespace - operatorNamespace, err := env.GetOperatorNamespaceName() + operatorNamespace, err := operator.NamespaceName(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create Cluster @@ -76,18 +81,20 @@ var _ = Describe("Operator High Availability", Serial, By("verifying current leader", func() { // Check for the current Operator Pod leader from ConfigMap - Expect(testsUtils.GetLeaderInfoFromLease(operatorNamespace, env)).To(HavePrefix(operatorPodName.GetName())) + Expect(operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace)).To(HavePrefix(operatorPodName.GetName())) }) By("scale up operator replicas to 3", func() { // Set old leader pod name to operator pod name oldLeaderPodName = operatorPodName.GetName() - err := env.ScaleOperatorDeployment(3) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 3) Expect(err).ToNot(HaveOccurred()) // Gather pod names from operator deployment - podList, err := env.GetPodList(operatorNamespace) + podList, err := podutils.List(env.Ctx, env.Client, operatorNamespace) Expect(err).ToNot(HaveOccurred()) for _, podItem := range podList.Items { operatorPodNames = append(operatorPodNames, podItem.GetName()) @@ -97,7 +104,9 @@ var _ = Describe("Operator High Availability", Serial, By("verifying leader information after scale up", func() { // Check for Operator Pod leader from ConfigMap to be the former one Eventually(func() (string, error) { - return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env) + return operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace) }, 60).Should(HavePrefix(oldLeaderPodName)) }) @@ -106,12 +115,12 @@ var _ = Describe("Operator High Availability", Serial, quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(operatorNamespace, oldLeaderPodName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, operatorNamespace, oldLeaderPodName, quickDelete) Expect(err).ToNot(HaveOccurred()) // Verify operator pod should have been deleted Eventually(func() []string { - podList, err := env.GetPodList(operatorNamespace) + podList, err := podutils.List(env.Ctx, env.Client, operatorNamespace) Expect(err).ToNot(HaveOccurred()) var podNames []string for _, podItem := range podList.Items { @@ -124,13 +133,15 @@ var _ = Describe("Operator High Availability", Serial, By("new leader should be configured", func() { // Verify that the leader name is different from the previous one Eventually(func() (string, error) { - return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env) + return operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace) }, 120).ShouldNot(HavePrefix(oldLeaderPodName)) }) By("verifying reconciliation", func() { // Get current CNPG cluster's Primary - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimary := currentPrimary.GetName() @@ -138,7 +149,7 @@ var _ = Describe("Operator High Availability", Serial, quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete) Expect(err).ToNot(HaveOccurred()) // Expect a new primary to be elected and promoted @@ -147,18 +158,20 @@ var _ = Describe("Operator High Availability", Serial, By("scale down operator replicas to 1", func() { // Scale down operator deployment to one replica - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) }) By("verifying leader information after scale down", func() { // Get Operator Pod name - operatorPodName, err := env.GetOperatorPod() + operatorPodName, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // Verify the Operator Pod is the leader Eventually(func() (string, error) { - return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env) + return operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace) }, 120).Should(HavePrefix(operatorPodName.GetName())) }) }) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index fb09f0aa8a..4b3b635cae 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,27 +13,36 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( "sync" + "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) // Set of tests in which we test the concurrent disruption of both the primary -// and the operator pods, asserting that the latter is able to perform a pending +// and the operator podutils, asserting that the latter is able to perform a pending // failover once a new operator pod comes back available. var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, tests.LabelOperator), func() { const ( @@ -53,18 +63,22 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te It("can survive operator failures", func() { var err error // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) // Load test data currentPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) By("scaling down operator replicas to zero", func() { - err := env.ScaleOperatorDeployment(0) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 0) Expect(err).ToNot(HaveOccurred()) }) @@ -73,20 +87,20 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, currentPrimary, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete) Expect(err).ToNot(HaveOccurred()) // Expect only 2 instances to be up and running - Eventually(func() int32 { + Eventually(func(g Gomega) { podList := &corev1.PodList{} err := env.Client.List( env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.ClusterLabelName: clusterName}, ) - Expect(err).ToNot(HaveOccurred()) - return int32(len(podList.Items)) - }, 120).Should(BeEquivalentTo(2)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(podList.Items).To(HaveLen(2)) + }, 120).Should(Succeed()) // And to stay like that Consistently(func() int32 { @@ -103,7 +117,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te By("scaling up the operator replicas to 1", func() { // Scale up operator deployment to one replica - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) }) @@ -122,10 +136,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te return specs.IsPodStandby(pod), err }, timeout).Should(BeTrue()) }) - // Expect the test data previously created to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) }) }) @@ -136,17 +147,21 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te var operatorPodName string var err error // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) // Load test data currentPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) - - operatorNamespace, err := env.GetOperatorNamespaceName() + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) + + operatorNamespace, err := operator.NamespaceName(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) By("deleting primary and operator pod", func() { @@ -154,7 +169,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te podList := &corev1.PodList{} err := env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(operatorNamespace)) Expect(err).ToNot(HaveOccurred()) - operatorPodName = podList.Items[0].ObjectMeta.Name + operatorPodName = podList.Items[0].Name // Force-delete the operator and the primary quickDelete := &ctrlclient.DeleteOptions{ @@ -165,25 +180,25 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te wg.Add(1) wg.Add(1) go func() { - _ = env.DeletePod(operatorNamespace, operatorPodName, quickDelete) + _ = podutils.Delete(env.Ctx, env.Client, operatorNamespace, operatorPodName, quickDelete) wg.Done() }() go func() { - _ = env.DeletePod(namespace, currentPrimary, quickDelete) + _ = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete) wg.Done() }() wg.Wait() // Expect only 2 instances to be up and running - Eventually(func() int32 { + Eventually(func(g Gomega) { podList := &corev1.PodList{} err := env.Client.List( env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.ClusterLabelName: "operator-unavailable"}, ) - Expect(err).ToNot(HaveOccurred()) - return int32(len(utils.FilterActivePods(podList.Items))) - }, 120).Should(BeEquivalentTo(2)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(utils.FilterActivePods(podList.Items)).To(HaveLen(2)) + }, 120).Should(Succeed()) }) By("verifying a new operator pod is now back", func() { @@ -196,7 +211,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te g.Expect(podList.Items[0].Name).NotTo(BeEquivalentTo(operatorPodName)) }, timeout).Should(Succeed()) Eventually(func() (bool, error) { - return env.IsOperatorDeploymentReady() + return operator.IsReady(env.Ctx, env.Client, true) }, timeout).Should(BeTrue()) }) @@ -215,10 +230,21 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te return specs.IsPodStandby(pod), err }, timeout).Should(BeTrue()) }) - // Expect the test data previously created to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) + + // There is a chance that the webhook is not able to reach the new operator pod yet. + // This could make following tests fail, so we need to wait for the webhook to be working again. + By("verifying the webhook is working again", func() { + invalidCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "invalid"}, + Spec: apiv1.ClusterSpec{Instances: 1}, + } + Eventually(func(g Gomega) { + err := env.Client.Create(env.Ctx, invalidCluster) + g.Expect(errors.IsInvalid(err)).To(BeTrue()) + g.Expect(err).To(MatchError(ContainSubstring("spec.storage.size"))) + }).WithTimeout(10 * time.Second).Should(Succeed()) + }) }) }) }) diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go index a364578c9f..7a589859a6 100644 --- a/tests/e2e/pg_basebackup_test.go +++ b/tests/e2e/pg_basebackup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -19,7 +22,9 @@ package e2e import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -46,28 +51,34 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun Context("can bootstrap via pg_basebackup", Ordered, func() { BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create the source Cluster - srcClusterName, err = env.GetResourceNameFromYAML(srcCluster) + srcClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, srcCluster) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, srcClusterName, srcCluster, env) - AssertCreateTestData(namespace, srcClusterName, tableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) }) It("using basic authentication", func() { // Create the destination Cluster - dstClusterName, err := env.GetResourceNameFromYAML(dstClusterBasic) + dstClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, dstClusterBasic) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, dstClusterName, dstClusterBasic, env) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, dstClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) secretName := dstClusterName + apiv1.ApplicationUserSecretSuffix By("checking the dst cluster with auto generated app password connectable", func() { AssertApplicationDatabaseConnection(namespace, dstClusterName, - appUser, utils.AppDBName, "", secretName, psqlClientPod) + appUser, postgres.AppDBName, "", secretName) }) By("update user application password for dst cluster and verify connectivity", func() { @@ -77,43 +88,96 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun namespace, dstClusterName, appUser, - utils.AppDBName, + postgres.AppDBName, newPassword, - secretName, - psqlClientPod) + secretName) }) By("checking data have been copied correctly", func() { - AssertDataExpectedCount(namespace, dstClusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: dstClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) By("writing some new data to the dst cluster", func() { - insertRecordIntoTable(namespace, dstClusterName, tableName, 3, psqlClientPod) + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + dstClusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableName, 3, conn) }) By("checking the src cluster was not modified", func() { - AssertDataExpectedCount(namespace, srcClusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) It("using TLS authentication", func() { // Create the destination Cluster - dstClusterName, err := env.GetResourceNameFromYAML(dstClusterTLS) + dstClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, dstClusterTLS) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, dstClusterName, dstClusterTLS, env) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, dstClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) By("checking data have been copied correctly", func() { - AssertDataExpectedCount(namespace, dstClusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: dstClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) By("writing some new data to the dst cluster", func() { - insertRecordIntoTable(namespace, dstClusterName, tableName, 3, psqlClientPod) + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + dstClusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableName, 3, conn) }) By("checking the src cluster was not modified", func() { - AssertDataExpectedCount(namespace, srcClusterName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go index 9d2ddc9e8c..e70ba116b7 100644 --- a/tests/e2e/pg_data_corruption_test.go +++ b/tests/e2e/pg_data_corruption_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -27,7 +30,13 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -44,7 +53,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( Skip("Test depth is lower than the amount requested for this test") } var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) }) @@ -55,13 +64,19 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( var oldPrimaryPodName, oldPrimaryPVCName string var err error tableName := "test_pg_data_corruption" - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) By("gathering current primary pod and pvc", func() { - oldPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + oldPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimaryPodName = oldPrimaryPod.GetName() // Get the PVC related to the pod @@ -78,8 +93,9 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( By("corrupting primary pod by removing PGDATA", func() { cmd := fmt.Sprintf("find %v/base/* -type f -delete", specs.PgDataPath) - _, _, err = env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: oldPrimaryPodName, }, nil, @@ -89,7 +105,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( By("verifying failover happened after the primary pod PGDATA got corrupted", func() { Eventually(func() (string, error) { - newPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + newPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -120,7 +136,10 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( By("removing the old primary pod and its pvc", func() { // Check if walStorage is enabled - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) // Force delete setting @@ -154,7 +173,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( } // Deleting old primary pod - err = env.DeletePod(namespace, oldPrimaryPodName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, oldPrimaryPodName, quickDelete) Expect(err).ToNot(HaveOccurred()) // checking that the old primary pod is eventually gone @@ -187,8 +206,8 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( }, 300).Should(BeTrue()) }) AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertDataExpectedCount(env, tableLocator, 2) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) } Context("plain cluster", func() { diff --git a/tests/e2e/pg_wal_volume_test.go b/tests/e2e/pg_wal_volume_test.go index 612f936b59..164ee134b8 100644 --- a/tests/e2e/pg_wal_volume_test.go +++ b/tests/e2e/pg_wal_volume_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -30,7 +33,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -46,7 +51,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { ) var namespace string verifyPgWal := func(namespace string) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items), err).To(BeEquivalentTo(3)) By("checking that pg_wal PVC has been created", func() { for _, pod := range podList.Items { @@ -77,8 +82,9 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { ".*[0-9]$") timeout := 300 Eventually(func() (int, error, error) { - out, _, err := env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: pod.GetName(), }, nil, @@ -93,7 +99,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { // Inline function to patch walStorage in existing cluster updateWalStorage := func(namespace, clusterName string) { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) WalStorageClass := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") cluster.Spec.WalStorage = &apiv1.StorageConfiguration{ @@ -118,7 +124,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { const namespacePrefix = "pg-wal-volume-e2e" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithPgWal, env) verifyPgWal(namespace) @@ -128,7 +134,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { const namespacePrefix = "add-pg-wal-volume-e2e" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithoutPgWal, env) By(fmt.Sprintf("adding pg_wal volume in existing cluster: %v", clusterName), func() { @@ -137,7 +143,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { AssertPVCCount(namespace, clusterName, expectedPvcCount, 120) AssertClusterEventuallyReachesPhase(namespace, clusterName, []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 30) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) AssertClusterPhaseIsConsistent(namespace, clusterName, []string{apiv1.PhaseHealthy}, 30) verifyPgWal(namespace) }) diff --git a/tests/e2e/pgbouncer_metrics_test.go b/tests/e2e/pgbouncer_metrics_test.go index a33e6a7285..bb400afced 100644 --- a/tests/e2e/pgbouncer_metrics_test.go +++ b/tests/e2e/pgbouncer_metrics_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -26,7 +29,8 @@ import ( pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -49,16 +53,16 @@ var _ = Describe("PGBouncer Metrics", Label(tests.LabelObservability), func() { It("should retrieve the metrics exposed by a freshly created pooler of type pgBouncer and validate its content", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(cnpgCluster) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, cnpgCluster) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, cnpgCluster, env) createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerBasicAuthRWSampleFile, 1) - poolerName, err := env.GetResourceNameFromYAML(poolerBasicAuthRWSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerBasicAuthRWSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -96,7 +100,7 @@ var _ = Describe("PGBouncer Metrics", Label(tests.LabelObservability), func() { for _, pod := range podList.Items { podName := pod.GetName() - out, err := utils.RetrieveMetricsFromPgBouncer(env, pod) + out, err := proxy.RetrieveMetricsFromPgBouncer(env.Ctx, env.Interface, pod) Expect(err).ToNot(HaveOccurred()) matches := metricsRegexp.FindAllString(out, -1) Expect(matches).To( diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go index 7bb591540f..6631e15124 100644 --- a/tests/e2e/pgbouncer_test.go +++ b/tests/e2e/pgbouncer_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,21 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( + corev1 "k8s.io/api/core/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -43,14 +53,16 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), Context("no user-defined certificates", Ordered, func() { BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace("pgbouncer-auth-no-user-certs") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-auth-no-user-certs") Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) JustAfterEach(func() { - DeleteTableUsingPgBouncerService(namespace, clusterName, poolerBasicAuthRWSampleFile, env, psqlClientPod) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + DeleteTableUsingPgBouncerService(namespace, clusterName, poolerBasicAuthRWSampleFile, env, primaryPod) }) It("can connect to Postgres via pgbouncer service using basic authentication", func() { @@ -73,6 +85,14 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), assertReadWriteConnectionUsingPgBouncerService(namespace, clusterName, poolerBasicAuthROSampleFile, false) }) + + By("executing psql within the pgbouncer pod", func() { + pod, err := getPgbouncerPod(namespace, poolerBasicAuthRWSampleFile) + Expect(err).ToNot(HaveOccurred()) + + err = runShowHelpInPod(pod) + Expect(err).ToNot(HaveOccurred()) + }) }) It("can connect to Postgres via pgbouncer service using tls certificates", func() { @@ -139,9 +159,9 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), caSecNameClient = "my-postgresql-client-ca" ) // Create a cluster in a namespace that will be deleted after the test - namespace, err = env.CreateUniqueTestNamespace("pgbouncer-separate-certificates") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-separate-certificates") Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFileWithCertificate) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithCertificate) Expect(err).ToNot(HaveOccurred()) // Create certificates secret for server @@ -174,3 +194,27 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), }) }) }) + +func getPgbouncerPod(namespace, sampleFile string) (*corev1.Pod, error) { + poolerKey, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) + if err != nil { + return nil, err + } + + Expect(err).ToNot(HaveOccurred()) + + var podList corev1.PodList + err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(namespace), + ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey}) + Expect(err).ToNot(HaveOccurred()) + Expect(len(podList.Items)).Should(BeEquivalentTo(1)) + return &podList.Items[0], nil +} + +func runShowHelpInPod(pod *corev1.Pod) error { + _, _, err := exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, *pod, + "pgbouncer", nil, "psql", "-c", "SHOW HELP", + ) + return err +} diff --git a/tests/e2e/pgbouncer_types_test.go b/tests/e2e/pgbouncer_types_test.go index dbdfd51325..59bf929f79 100644 --- a/tests/e2e/pgbouncer_types_test.go +++ b/tests/e2e/pgbouncer_types_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -24,7 +27,8 @@ import ( pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,9 +57,9 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test // This cluster will be shared by the next tests - namespace, err = env.CreateUniqueTestNamespace("pgbouncer-types") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-types") Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) @@ -74,7 +78,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verify that read-only pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateROSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateROSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -89,7 +93,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verify that read-write pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateRWSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateRWSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -105,7 +109,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit By(fmt.Sprintf("scaling PGBouncer to %v instances", instances), func() { command := fmt.Sprintf("kubectl scale pooler %s -n %s --replicas=%v", poolerResourceNameRO, namespace, instances) - _, _, err := utils.Run(command) + _, _, err := run.Run(command) Expect(err).ToNot(HaveOccurred()) // verifying if PGBouncer pooler pods are ready after scale up @@ -114,7 +118,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit // // scale up command for 3 replicas for read write command = fmt.Sprintf("kubectl scale pooler %s -n %s --replicas=%v", poolerResourceNameRW, namespace, instances) - _, _, err = utils.Run(command) + _, _, err = run.Run(command) Expect(err).ToNot(HaveOccurred()) // verifying if PGBouncer pooler pods are ready after scale up @@ -126,7 +130,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verifying that read-only pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateROSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateROSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -141,7 +145,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verifying that read-write pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateRWSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateRWSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), diff --git a/tests/e2e/pod_patch_test.go b/tests/e2e/pod_patch_test.go new file mode 100644 index 0000000000..f34a50bb38 --- /dev/null +++ b/tests/e2e/pod_patch_test.go @@ -0,0 +1,104 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Pod patch", Label(tests.LabelSmoke, tests.LabelBasic), func() { + const ( + sampleFile = fixturesDir + "/base/cluster-storage-class.yaml.template" + clusterName = "postgresql-storage-class" + level = tests.Lowest + ) + + var namespace string + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + It("use the podPatch annotation to generate Pods", func(_ SpecContext) { + const namespacePrefix = "cluster-patch-e2e" + var err error + + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + AssertCreateCluster(namespace, clusterName, sampleFile, env) + + By("adding the podPatch annotation", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + patchedCluster := cluster.DeepCopy() + + patchedCluster.SetAnnotations(map[string]string{ + utils.PodPatchAnnotationName: ` + [ + { + "op": "add", + "path": "/metadata/annotations/e2e.cnpg.io", + "value": "this-test" + } + ] + `, + }) + err = env.Client.Patch(env.Ctx, patchedCluster, client.MergeFrom(cluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting all the Pods", func() { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for i := range podList.Items { + err := env.Client.Delete(env.Ctx, &podList.Items[i]) + Expect(err).ToNot(HaveOccurred()) + } + }) + + By("waiting for the new annotation to be applied to the new Pods", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + timeout := 120 + Eventually(func(g Gomega) { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(podList.Items).To(HaveLen(cluster.Spec.Instances)) + + for _, pod := range podList.Items { + g.Expect(pod.Annotations).To(HaveKeyWithValue("e2e.cnpg.io", "this-test")) + } + }, timeout).Should(Succeed()) + }) + }) +}) diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go new file mode 100644 index 0000000000..6d15855da2 --- /dev/null +++ b/tests/e2e/probes_test.go @@ -0,0 +1,199 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Set of tests in which we check that the configuration of the readiness probes is applied +var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { + const ( + level = tests.High + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + It("can change the probes configuration", func(ctx SpecContext) { + var namespace string + + const sampleFile = fixturesDir + "/base/cluster-storage-class.yaml.template" + const clusterName = "postgresql-storage-class" + + // IMPORTANT: for this E2e to work, these values need to be different + // than the default Kubernetes settings + probeConfiguration := apiv1.Probe{ + InitialDelaySeconds: 2, + PeriodSeconds: 4, + TimeoutSeconds: 8, + } + probesConfiguration := apiv1.ProbesConfiguration{ + Startup: &apiv1.ProbeWithStrategy{ + Probe: probeConfiguration, + }, + Liveness: &apiv1.LivenessProbe{ + Probe: probeConfiguration, + }, + Readiness: &apiv1.ProbeWithStrategy{ + Probe: probeConfiguration, + }, + } + + assertProbeCoherentWithConfiguration := func(probe *corev1.Probe) { + Expect(probe.InitialDelaySeconds).To(BeEquivalentTo(probeConfiguration.InitialDelaySeconds)) + Expect(probe.PeriodSeconds).To(BeEquivalentTo(probeConfiguration.PeriodSeconds)) + Expect(probe.TimeoutSeconds).To(BeEquivalentTo(probeConfiguration.TimeoutSeconds)) + } + + assertProbesCoherentWithConfiguration := func(container *corev1.Container) { + assertProbeCoherentWithConfiguration(container.LivenessProbe) + assertProbeCoherentWithConfiguration(container.ReadinessProbe) + assertProbeCoherentWithConfiguration(container.StartupProbe) + } + + var defaultReadinessProbe *corev1.Probe + var defaultLivenessProbe *corev1.Probe + var defaultStartupProbe *corev1.Probe + + By("creating an empty cluster", func() { + // Create a cluster in a namespace we'll delete after the test + const namespacePrefix = "probes" + var err error + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + AssertCreateCluster(namespace, clusterName, sampleFile, env) + }) + + By("getting the default probes configuration", func() { + var pod corev1.Pod + err := env.Client.Get(ctx, client.ObjectKey{ + Name: fmt.Sprintf("%s-1", clusterName), + Namespace: namespace, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(pod.Spec.Containers[0].Name).To(Equal("postgres")) + defaultReadinessProbe = pod.Spec.Containers[0].ReadinessProbe.DeepCopy() + defaultLivenessProbe = pod.Spec.Containers[0].LivenessProbe.DeepCopy() + defaultStartupProbe = pod.Spec.Containers[0].StartupProbe.DeepCopy() + }) + + By("applying a probe configuration", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + originalCluster := cluster.DeepCopy() + cluster.Spec.Probes = probesConfiguration.DeepCopy() + + err = env.Client.Patch(ctx, &cluster, client.MergeFrom(originalCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("waiting for the cluster to restart", func() { + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) + }) + + By("checking the applied settings", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + for _, instance := range cluster.Status.InstanceNames { + var pod corev1.Pod + err := env.Client.Get(ctx, client.ObjectKey{ + Name: instance, + Namespace: namespace, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(pod.Spec.Containers[0].Name).To(Equal("postgres")) + assertProbesCoherentWithConfiguration(&pod.Spec.Containers[0]) + } + }) + + By("reverting back the changes", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + originalCluster := cluster.DeepCopy() + cluster.Spec.Probes = &apiv1.ProbesConfiguration{} + + err = env.Client.Patch(ctx, &cluster, client.MergeFrom(originalCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("waiting for the cluster to restart", func() { + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) + }) + + By("checking the applied settings", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + for _, instance := range cluster.Status.InstanceNames { + var pod corev1.Pod + err = env.Client.Get(ctx, client.ObjectKey{ + Name: instance, + Namespace: namespace, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(pod.Spec.Containers[0].Name).To(Equal("postgres")) + Expect(pod.Spec.Containers[0].LivenessProbe).To(BeEquivalentTo(defaultLivenessProbe)) + Expect(pod.Spec.Containers[0].ReadinessProbe).To(BeEquivalentTo(defaultReadinessProbe)) + Expect(pod.Spec.Containers[0].StartupProbe).To(BeEquivalentTo(defaultStartupProbe)) + } + }) + }) +}) diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go new file mode 100644 index 0000000000..a3ea4166af --- /dev/null +++ b/tests/e2e/publication_subscription_test.go @@ -0,0 +1,361 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/types" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// - spinning up a cluster, apply a declarative publication/subscription on it + +// Set of tests in which we use the declarative publication and subscription CRDs on an existing cluster +var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSubscription), func() { + const ( + sourceClusterManifest = fixturesDir + "/declarative_pub_sub/source-cluster.yaml.template" + destinationClusterManifest = fixturesDir + "/declarative_pub_sub/destination-cluster.yaml.template" + sourceDatabaseManifest = fixturesDir + "/declarative_pub_sub/source-database.yaml" + destinationDatabaseManifest = fixturesDir + "/declarative_pub_sub/destination-database.yaml" + pubManifest = fixturesDir + "/declarative_pub_sub/pub.yaml" + subManifest = fixturesDir + "/declarative_pub_sub/sub.yaml" + level = tests.Medium + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + Context("in a plain vanilla cluster", Ordered, func() { + const ( + namespacePrefix = "declarative-pub-sub" + dbname = "declarative" + subName = "sub" + pubName = "pub" + tableName = "test" + ) + var ( + sourceClusterName, destinationClusterName, namespace string + err error + ) + + BeforeAll(func() { + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterManifest) + Expect(err).ToNot(HaveOccurred()) + + destinationClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, destinationClusterManifest) + Expect(err).ToNot(HaveOccurred()) + + By("setting up source cluster", func() { + AssertCreateCluster(namespace, sourceClusterName, sourceClusterManifest, env) + }) + + By("setting up destination cluster", func() { + AssertCreateCluster(namespace, destinationClusterName, destinationClusterManifest, env) + }) + }) + + AfterEach(func() { + // We want to reuse the same source and destination Cluster, so + // we need to drop each Postgres object that has been created. + // We need to make sure that publication/subscription have been removed before + // attempting to drop the database, otherwise the DROP DATABASE will fail because + // there's an active logical replication slot. + destPrimaryPod, err := clusterutils.GetPrimary( + env.Ctx, env.Client, + namespace, destinationClusterName, + ) + Expect(err).ToNot(HaveOccurred()) + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: destPrimaryPod.Namespace, + PodName: destPrimaryPod.Name, + }, + dbname, + fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", subName), + RetryTimeout, + PollingTime, + ) + Expect(err).ToNot(HaveOccurred()) + + sourcePrimaryPod, err := clusterutils.GetPrimary( + env.Ctx, env.Client, + namespace, sourceClusterName, + ) + Expect(err).ToNot(HaveOccurred()) + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: sourcePrimaryPod.Namespace, + PodName: sourcePrimaryPod.Name, + }, + dbname, + fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pubName), + RetryTimeout, + PollingTime, + ) + Expect(err).ToNot(HaveOccurred()) + + Expect(DeleteResourcesFromFile(namespace, destinationDatabaseManifest)).To(Succeed()) + Expect(DeleteResourcesFromFile(namespace, sourceDatabaseManifest)).To(Succeed()) + Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, postgres.PostgresDBName, + databaseExistsQuery(dbname), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(destPrimaryPod, postgres.PostgresDBName, + databaseExistsQuery(dbname), "f"), 30).Should(Succeed()) + }) + + assertCreateDatabase := func(namespace, clusterName, databaseManifest string) { + databaseObject := &apiv1.Database{} + databaseObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) + Expect(err).NotTo(HaveOccurred()) + + By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() { + CreateResourceFromFile(namespace, databaseManifest) + }) + + By(fmt.Sprintf("ensuring the %s Database CRD succeeded reconciliation", databaseObjectName), func() { + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseObjectName, + } + + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, databaseObject) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(databaseObject.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By(fmt.Sprintf("verifying the %s database has been created", databaseObject.Spec.Name), func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, + databaseExistsQuery(databaseObject.Spec.Name), "t"), 30).Should(Succeed()) + }) + } + + // nolint:dupl + assertCreatePublication := func(namespace, clusterName, publicationManifest string) { + pubObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, publicationManifest) + Expect(err).NotTo(HaveOccurred()) + + By("applying Publication CRD manifest", func() { + CreateResourceFromFile(namespace, publicationManifest) + }) + + By("ensuring the Publication CRD succeeded reconciliation", func() { + // get publication object + pubNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: pubObjectName, + } + + Eventually(func(g Gomega) { + pub := &apiv1.Publication{} + err := env.Client.Get(env.Ctx, pubNamespacedName, pub) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pub.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("verifying new publication has been created", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + publicationExistsQuery(pubName), "t"), 30).Should(Succeed()) + }) + } + + // nolint:dupl + assertCreateSubscription := func(namespace, clusterName, subscriptionManifest string) { + subObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, subscriptionManifest) + Expect(err).NotTo(HaveOccurred()) + + By("applying Subscription CRD manifest", func() { + CreateResourceFromFile(namespace, subscriptionManifest) + }) + + By("ensuring the Subscription CRD succeeded reconciliation", func() { + // get subscription object + pubNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: subObjectName, + } + + Eventually(func(g Gomega) { + sub := &apiv1.Subscription{} + err := env.Client.Get(env.Ctx, pubNamespacedName, sub) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(sub.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("verifying new subscription has been created", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + subscriptionExistsQuery(subName), "t"), 30).Should(Succeed()) + }) + } + + assertTestPubSub := func(retainOnDeletion bool) { + assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest) + + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: dbname, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest) + + By("creating an empty table inside the destination database", func() { + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName) + _, err = postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, destinationClusterName, dbname, + apiv1.ApplicationUserSecretSuffix, query, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + assertCreatePublication(namespace, sourceClusterName, pubManifest) + assertCreateSubscription(namespace, destinationClusterName, subManifest) + + var ( + publication apiv1.Publication + subscription apiv1.Subscription + ) + By("setting the reclaimPolicy", func() { + publicationReclaimPolicy := apiv1.PublicationReclaimDelete + subscriptionReclaimPolicy := apiv1.SubscriptionReclaimDelete + if retainOnDeletion { + publicationReclaimPolicy = apiv1.PublicationReclaimRetain + subscriptionReclaimPolicy = apiv1.SubscriptionReclaimRetain + } + // Get the object names + pubObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, pubManifest) + Expect(err).NotTo(HaveOccurred()) + subObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, subManifest) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func(g Gomega) { + err = objects.Get( + env.Ctx, env.Client, + types.NamespacedName{Namespace: namespace, Name: pubObjectName}, + &publication, + ) + g.Expect(err).ToNot(HaveOccurred()) + publication.Spec.ReclaimPolicy = publicationReclaimPolicy + err = env.Client.Update(env.Ctx, &publication) + g.Expect(err).ToNot(HaveOccurred()) + + err = objects.Get( + env.Ctx, env.Client, + types.NamespacedName{Namespace: namespace, Name: subObjectName}, + &subscription, + ) + g.Expect(err).ToNot(HaveOccurred()) + subscription.Spec.ReclaimPolicy = subscriptionReclaimPolicy + err = env.Client.Update(env.Ctx, &subscription) + g.Expect(err).ToNot(HaveOccurred()) + }, 60, 5).Should(Succeed()) + }) + + By("checking that the data is present inside the destination cluster database", func() { + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: destinationClusterName, + DatabaseName: dbname, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + }) + + By("removing the objects", func() { + Expect(objects.Delete(env.Ctx, env.Client, &publication)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, &subscription)).To(Succeed()) + }) + + By("verifying the publication reclaim policy outcome", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, sourceClusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + publicationExistsQuery(pubName), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) + }) + + By("verifying the subscription reclaim policy outcome", func() { + primaryPodInfo, err := clusterutils.GetPrimary( + env.Ctx, env.Client, + namespace, destinationClusterName, + ) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + subscriptionExistsQuery(subName), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) + }) + } + + When("Reclaim policy is set to delete", func() { + It("can manage Publication and Subscription and delete them in Postgres", func() { + assertTestPubSub(false) + }) + }) + + When("Reclaim policy is set to retain", func() { + It("can manage Publication and Subscription and release it", func() { + assertTestPubSub(true) + }) + }) + }) +}) + +func publicationExistsQuery(pubName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_publication WHERE pubname='%s')", pubName) +} + +func subscriptionExistsQuery(subName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_subscription WHERE subname='%s')", subName) +} diff --git a/tests/e2e/pvc_deletion_test.go b/tests/e2e/pvc_deletion_test.go index ac5d7032bc..8c6ac17099 100644 --- a/tests/e2e/pvc_deletion_test.go +++ b/tests/e2e/pvc_deletion_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -25,7 +28,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,7 +52,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { It("correctly manages PVCs", func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -79,7 +83,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, podName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, podName, quickDelete) Expect(err).ToNot(HaveOccurred()) // The pod should be back @@ -122,7 +126,10 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { originalPVCUID := pvc.GetUID() // Check if walStorage is enabled - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) // Force delete setting @@ -149,7 +156,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { } // Deleting primary pod - err = env.DeletePod(namespace, podName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, podName, quickDelete) Expect(err).ToNot(HaveOccurred()) // A new pod should be created diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 5b669b8347..03eaba9040 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -33,10 +36,17 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -75,7 +85,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName = "replica_mode_tls_auth" ) - replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix) + replicaNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(replicaNamespace, srcClusterName, srcClusterSample, env) @@ -85,9 +95,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSampleTLS, testTableName, - psqlClientPod) + ) - replicaName, err := env.GetResourceNameFromYAML(replicaClusterSampleTLS) + replicaName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleTLS) Expect(err).ToNot(HaveOccurred()) assertReplicaClusterTopology(replicaNamespace, replicaName) @@ -108,9 +118,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName = "replica_mode_basic_auth" ) - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleBasicAuth) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleBasicAuth) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(replicaNamespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, srcClusterName, srcClusterSample, env) @@ -120,7 +130,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSampleBasicAuth, testTableName, - psqlClientPod) + ) AssertDetachReplicaModeCluster( namespace, @@ -153,7 +163,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { return nil } - namespace, err = env.CreateUniqueTestNamespace("replica-promotion-demotion") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "replica-promotion-demotion") Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterOneName, clusterOneFile, env) @@ -163,71 +173,103 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, clusterTwoFile, testTableName, - psqlClientPod) + ) // turn the src cluster into a replica By("setting replica mode on the src cluster", func() { - cluster, err := env.GetCluster(namespace, clusterOneName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterOneName) Expect(err).ToNot(HaveOccurred()) updateTime := time.Now().Truncate(time.Second) cluster.Spec.ReplicaCluster.Enabled = ptr.To(true) err = env.Client.Update(ctx, cluster) Expect(err).ToNot(HaveOccurred()) Eventually(func(g Gomega) { - cluster, err := env.GetCluster(namespace, clusterOneName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterOneName) g.Expect(err).ToNot(HaveOccurred()) condition := getReplicaClusterSwitchCondition(cluster.Status.Conditions) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) g.Expect(condition.LastTransitionTime.Time).To(BeTemporally(">=", updateTime)) }).WithTimeout(30 * time.Second).Should(Succeed()) - AssertClusterIsReady(namespace, clusterOneName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterOneName, testTimeouts[timeouts.ClusterIsReady], env) }) By("checking that src cluster is now a replica cluster", func() { Eventually(func() error { - clusterOnePrimary, err = env.GetClusterPrimary(namespace, clusterOneName) + clusterOnePrimary, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterOneName) return err - }, 30, 3).Should(BeNil()) + }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(clusterOnePrimary, true) }) // turn the dst cluster into a primary By("disabling the replica mode on the dst cluster", func() { - cluster, err := env.GetCluster(namespace, clusterTwoName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterTwoName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.ReplicaCluster.Enabled = ptr.To(false) err = env.Client.Update(ctx, cluster) Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[timeouts.ClusterIsReady], env) }) By("checking that dst cluster has been promoted", func() { Eventually(func() error { - clusterTwoPrimary, err = env.GetClusterPrimary(namespace, clusterTwoName) + clusterTwoPrimary, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterTwoName) return err - }, 30, 3).Should(BeNil()) + }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(clusterTwoPrimary, false) }) By("creating a new data in the new source cluster", func() { - AssertCreateTestDataWithDatabaseName(namespace, clusterTwoName, sourceDBName, - "new_test_table", clusterTwoPrimary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterTwoName, + DatabaseName: sourceDBName, + TableName: "new_test_table", + } + Eventually(func() error { + _, err := postgres.RunExecOverForward(ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, clusterTwoName, sourceDBName, + apiv1.ApplicationUserSecretSuffix, + "SELECT 1;", + ) + return err + }, testTimeouts[timeouts.Short]).Should(Succeed()) + AssertCreateTestData(env, tableLocator) + }) + + // The dst Cluster gets promoted to primary, hence the new appUser password will + // be updated to reflect its "-app" secret. + // We need to copy the password changes over to the src Cluster, which is now a Replica + // Cluster, in order to connect using the "-app" secret. + By("updating the appUser secret of the src cluster", func() { + _, appSecretPassword, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterTwoName, namespace, + apiv1.ApplicationUserSecretSuffix) + Expect(err).ToNot(HaveOccurred()) + AssertUpdateSecret("password", appSecretPassword, clusterOneName+apiv1.ApplicationUserSecretSuffix, + namespace, clusterOneName, 30, env) }) By("checking that the data is present in the old src cluster", func() { - AssertDataExpectedCountWithDatabaseName( - namespace, - clusterOnePrimary.Name, - sourceDBName, - "new_test_table", - 2, - ) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterOneName, + DatabaseName: sourceDBName, + TableName: "new_test_table", + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) - Context("archive mode set to 'always' on designated primary", func() { + Context("archive mode set to 'always' on designated primary", Label(tests.LabelBackupRestore), func() { It("verifies replica cluster can archive WALs from the designated primary", func() { const ( replicaClusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-archive-mode-always.yaml.template" @@ -235,12 +277,21 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName = "replica_mode_archive" ) - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSample) Expect(err).ToNot(HaveOccurred()) - replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix) + replicaNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) + By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(replicaNamespace, "backup-storage-creds", "minio", "minio123") + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + replicaNamespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -256,19 +307,28 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSample, testTableName, - psqlClientPod) + ) // Get primary from replica cluster - primaryReplicaCluster, err := env.GetClusterPrimary(replicaNamespace, replicaClusterName) + primaryReplicaCluster, err := clusterutils.GetPrimary( + env.Ctx, + env.Client, + replicaNamespace, + replicaClusterName, + ) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 - By("verify archive mode is set to 'always on' designated primary", func() { query := "show archive_mode;" Eventually(func() (string, error) { - stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", sourceDBName, "-tAc", query) + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, + sourceDBName, + query) return strings.Trim(stdOut, "\n"), err }, 30).Should(BeEquivalentTo("always")) }) @@ -280,7 +340,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) }) - Context("can bootstrap a replica cluster from a backup", Ordered, func() { + Context("can bootstrap a replica cluster from a backup", Label(tests.LabelBackupRestore), Ordered, func() { const ( clusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-src-with-backup.yaml.template" namespacePrefix = "replica-cluster-from-backup" @@ -290,11 +350,19 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { BeforeAll(func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -303,7 +371,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) // Create the cluster - clusterName, err = env.GetResourceNameFromYAML(clusterSample) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSample) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterSample, env) }) @@ -316,13 +384,15 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { By("creating a backup and waiting until it's completed", func() { backupName := fmt.Sprintf("%v-backup", clusterName) - backup, err := testUtils.CreateOnDemandBackup( + backup, err := backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterName, backupName, apiv1.BackupTargetStandby, apiv1.BackupMethodBarmanObjectStore, - env) + ) Expect(err).ToNot(HaveOccurred()) Eventually(func() (apiv1.BackupPhase, error) { @@ -331,7 +401,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Name: backupName, }, backup) return backup.Status.Phase, err - }, testTimeouts[testUtils.BackupIsReady]).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) + }, testTimeouts[timeouts.BackupIsReady]).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) }) By("creating a replica cluster from the backup", func() { @@ -341,7 +411,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSample, testTableName, - psqlClientPod) + ) }) }) @@ -369,13 +439,15 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { By("creating a snapshot and waiting until it's completed", func() { var err error snapshotName := fmt.Sprintf("%v-snapshot", clusterName) - backup, err = testUtils.CreateOnDemandBackup( + backup, err = backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterName, snapshotName, apiv1.BackupTargetStandby, apiv1.BackupMethodVolumeSnapshot, - env) + ) Expect(err).ToNot(HaveOccurred()) Eventually(func(g Gomega) { @@ -386,7 +458,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted)) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("fetching the volume snapshots", func() { @@ -397,11 +469,11 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Expect(err).ToNot(HaveOccurred()) Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) Expect(err).ToNot(HaveOccurred()) }) @@ -412,7 +484,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSample, testTableName, - psqlClientPod) + ) }) }) }) @@ -420,7 +492,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { // In this test we create a replica cluster from a backup and then promote it to a primary. // We expect the original primary to be demoted to a replica and be able to follow the new primary. -var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, func() { +var _ = Describe("Replica switchover", Label(tests.LabelReplication, tests.LabelBackupRestore), Ordered, func() { const ( replicaSwitchoverClusterDir = "/replica_mode_cluster/" namespacePrefix = "replica-switchover" @@ -444,11 +516,12 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) validateReplication := func(namespace, clusterAName, clusterBName string) { - primary, err := env.GetClusterPrimary(namespace, clusterBName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "CREATE TABLE test_replication AS SELECT 1;", ) @@ -456,14 +529,15 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f _ = switchWalAndGetLatestArchive(namespace, primary.Name) Eventually(func(g Gomega) { - podListA, err := env.GetClusterPodList(namespace, clusterAName) + podListA, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterAName) g.Expect(err).ToNot(HaveOccurred()) - podListB, err := env.GetClusterPodList(namespace, clusterBName) + podListB, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterBName) g.Expect(err).ToNot(HaveOccurred()) for _, podA := range podListA.Items { - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: podA.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: podA.Name}, "postgres", "SELECT * FROM test_replication;", ) @@ -471,43 +545,45 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f } for _, podB := range podListB.Items { - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: podB.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: podB.Name}, "postgres", "SELECT * FROM test_replication;", ) g.Expect(err).ToNot(HaveOccurred()) } - }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed()) } waitForTimelineIncrease := func(namespace, clusterName string, expectedTimeline int) bool { return Eventually(func(g Gomega) { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", - "SELECT timeline_id FROM pg_control_checkpoint();", + "SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()", ) g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.TrimSpace(stdout)).To(Equal(fmt.Sprintf("%d", expectedTimeline))) - }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed()) } DescribeTable("should demote and promote the clusters correctly", func(clusterAFile string, clusterBFile string, expectedTimeline int) { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) DeferCleanup(func() error { // Since we use multiple times the same cluster names for the same minio instance, we need to clean it up // between tests - _, err = testUtils.CleanFilesOnMinio(minioEnv, path.Join("minio", "cluster-backups", clusterAName)) + _, err = minio.CleanFiles(minioEnv, path.Join("minio", "cluster-backups", clusterAName)) if err != nil { return err } - _, err = testUtils.CleanFilesOnMinio(minioEnv, path.Join("minio", "cluster-backups", clusterBName)) + _, err = minio.CleanFiles(minioEnv, path.Join("minio", "cluster-backups", clusterBName)) if err != nil { return err } @@ -518,7 +594,15 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f DeferCleanup(func() { close(stopLoad) }) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -528,15 +612,16 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f By("creating the A cluster", func() { var err error - clusterAName, err = env.GetResourceNameFromYAML(clusterAFile) + clusterAName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterAFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterAName, clusterAFile, env) }) By("creating some load on the A cluster", func() { - primary, err := env.GetClusterPrimary(namespace, clusterAName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "CREATE TABLE switchover_load (i int);", ) @@ -544,8 +629,9 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f go func() { for { - _, _, _ = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + _, _, _ = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "INSERT INTO switchover_load SELECT generate_series(1, 10000)", ) @@ -561,7 +647,8 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) By("backing up the A cluster", func() { - backup, err := testUtils.CreateBackup( + backup, err := backups.Create( + env.Ctx, env.Client, apiv1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -573,12 +660,11 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f Cluster: apiv1.LocalObjectReference{Name: clusterAName}, }, }, - env, ) Expect(err).ToNot(HaveOccurred()) // Speed up backup finalization - primary, err := env.GetClusterPrimary(namespace, clusterAName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) _ = switchWalAndGetLatestArchive(namespace, primary.Name) Expect(err).ToNot(HaveOccurred()) @@ -591,25 +677,25 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }, backup) return backup.Status.Phase, err }, - testTimeouts[testUtils.BackupIsReady], + testTimeouts[timeouts.BackupIsReady], ).WithPolling(10 * time.Second). Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) }) By("creating the B cluster from the backup", func() { var err error - clusterBName, err = env.GetResourceNameFromYAML(clusterBFile) + clusterBName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterBFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterBName, clusterBFile, env) }) By("demoting A to a replica", func() { - cluster, err := env.GetCluster(namespace, clusterAName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) oldCluster := cluster.DeepCopy() cluster.Spec.ReplicaCluster.Primary = clusterBName Expect(env.Client.Patch(env.Ctx, cluster, k8client.MergeFrom(oldCluster))).To(Succeed()) - podList, err := env.GetClusterPodList(namespace, clusterAName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { AssertPgRecoveryMode(&pod, true) @@ -618,7 +704,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f var token, invalidToken string By("getting the demotion token", func() { - cluster, err := env.GetCluster(namespace, clusterAName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) token = cluster.Status.DemotionToken }) @@ -633,7 +719,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) By("promoting B with the invalid token", func() { - cluster, err := env.GetCluster(namespace, clusterBName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) oldCluster := cluster.DeepCopy() @@ -644,20 +730,26 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f By("failing to promote B with the invalid token", func() { Consistently(func(g Gomega) { - pod, err := env.GetClusterPrimary(namespace, clusterBName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName) g.Expect(err).ToNot(HaveOccurred()) - stdOut, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, ptr.To(time.Second*10), - "psql", "-U", "postgres", "postgres", "-tAc", "select pg_is_in_recovery();") + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "select pg_catalog.pg_is_in_recovery()") g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.Trim(stdOut, "\n")).To(Equal("t")) }, 60, 10).Should(Succeed()) - cluster, err := env.GetCluster(namespace, clusterBName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.Status.Phase).To(BeEquivalentTo(apiv1.PhaseUnrecoverable)) }) By("promoting B with the right token", func() { - cluster, err := env.GetCluster(namespace, clusterBName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) oldCluster := cluster.DeepCopy() cluster.Spec.ReplicaCluster.PromotionToken = token @@ -670,10 +762,10 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) By("verifying B contains the primary", func() { - primary, err := env.GetClusterPrimary(namespace, clusterBName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) AssertPgRecoveryMode(primary, false) - podList, err := env.GetClusterReplicas(namespace, clusterBName) + podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { AssertPgRecoveryMode(&pod, true) @@ -701,7 +793,7 @@ func assertReplicaClusterTopology(namespace, clusterName string) { standbys []string ) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.Status.ReadyInstances).To(BeEquivalentTo(cluster.Spec.Instances)) @@ -713,14 +805,15 @@ func assertReplicaClusterTopology(namespace, clusterName string) { standbys = funk.FilterString(cluster.Status.InstanceNames, func(name string) bool { return name != primary }) getStreamingInfo := func(podName string) ([]string, error) { - stdout, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + stdout, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: podName, }, &commandTimeout, "psql", "-U", "postgres", "-tAc", - "select string_agg(application_name, ',') from pg_stat_replication;", + "select string_agg(application_name, ',') from pg_catalog.pg_stat_replication;", ) if err != nil { return nil, err @@ -757,14 +850,15 @@ func assertReplicaClusterTopology(namespace, clusterName string) { By("verifying that the new primary is streaming from the source cluster", func() { Eventually(func(g Gomega) { - stdout, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + stdout, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary, }, &commandTimeout, "psql", "-U", "postgres", "-tAc", - "select sender_host from pg_stat_wal_receiver limit 1;", + "select sender_host from pg_catalog.pg_stat_wal_receiver limit 1", ) g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.TrimSpace(stdout)).To(BeEquivalentTo(sourceHost)) diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go index 4a576de7bb..9db01a5c60 100644 --- a/tests/e2e/replication_slot_test.go +++ b/tests/e2e/replication_slot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -24,7 +27,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/replicationslot" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,17 +53,19 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { It("Can enable and disable replication slots", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("enabling replication slot on cluster", func() { - err := testsUtils.ToggleHAReplicationSlots(namespace, clusterName, true, env) + err := replicationslot.ToggleHAReplicationSlots( + env.Ctx, env.Client, + namespace, clusterName, true) Expect(err).ToNot(HaveOccurred()) // Replication slots should be Enabled Consistently(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -65,21 +73,14 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }, 10, 2).Should(BeTrue()) }) - if env.PostgresVersion == 11 { - // We need to take into account the fact that on PostgreSQL 11 - // it is required to rolling restart the cluster to - // enable or disable the feature once the cluster is created. - AssertClusterRollingRestart(namespace, clusterName) - } - By("checking Primary HA slots exist and are active", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod( + expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod( + env.Ctx, env.Client, namespace, clusterName, primaryPod.GetName(), - env, ) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, *primaryPod, expectedSlots, true, false) @@ -90,12 +91,15 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { var err error before := time.Now() Eventually(func(g Gomega) { - replicaPods, err = env.GetClusterReplicas(namespace, clusterName) + replicaPods, err = clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(replicaPods.Items), err).To(BeEquivalentTo(2)) }, 90, 2).Should(Succeed()) GinkgoWriter.Println("standby slot check succeeded in", time.Since(before)) for _, pod := range replicaPods.Items { - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod( + env.Ctx, env.Client, + namespace, clusterName, pod.GetName(), + ) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false) } @@ -106,13 +110,18 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }) By("creating a physical replication slots on the primary", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod(primaryPod, testsUtils.PGLocalSocketDir, - "app", "postgres", "''", - fmt.Sprintf("SELECT pg_create_physical_replication_slot('%s');", userPhysicalSlot), - env) + query := fmt.Sprintf("SELECT pg_catalog.pg_create_physical_replication_slot('%s')", userPhysicalSlot) + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) Expect(err).ToNot(HaveOccurred()) }) @@ -121,7 +130,7 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { var err error before := time.Now() Eventually(func(g Gomega) { - replicaPods, err = env.GetClusterReplicas(namespace, clusterName) + replicaPods, err = clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(replicaPods.Items), err).To(BeEquivalentTo(2)) }, 90, 2).Should(Succeed()) GinkgoWriter.Println("standby slot check succeeded in", time.Since(before)) @@ -132,14 +141,18 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }) By("disabling replication slot from running cluster", func() { - err := testsUtils.ToggleHAReplicationSlots(namespace, clusterName, false, env) + err := replicationslot.ToggleHAReplicationSlots( + env.Ctx, env.Client, + namespace, clusterName, false) Expect(err).ToNot(HaveOccurred()) - err = testsUtils.ToggleSynchronizeReplicationSlots(namespace, clusterName, false, env) + err = replicationslot.ToggleSynchronizeReplicationSlots( + env.Ctx, env.Client, + namespace, clusterName, false) Expect(err).ToNot(HaveOccurred()) // Replication slots should be Disabled Consistently(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -147,19 +160,14 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }, 10, 2).Should(BeFalse()) }) - if env.PostgresVersion == 11 { - // We need to take into account the fact that on PostgreSQL 11 - // it is required to rolling restart the cluster to - // enable or disable the feature once the cluster is created. - AssertClusterRollingRestart(namespace, clusterName) - } - By("verifying slots have been removed from the cluster's pods", func() { - pods, err := env.GetClusterPodList(namespace, clusterName) + pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range pods.Items { Eventually(func(g Gomega) error { - slotOnPod, err := testsUtils.GetReplicationSlotsOnPod(namespace, pod.GetName(), env) + slotOnPod, err := replicationslot.GetReplicationSlotsOnPod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, pod.GetName(), postgres.AppDBName) if err != nil { return err } diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index 9c61427c56..02370589e1 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -19,6 +22,8 @@ package e2e import ( "os" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,11 +32,14 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -44,12 +52,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Skip("Test depth is lower than the amount requested for this test") } }) - // gatherClusterInfo returns the current lists of pods, pod UIDs and pvc UIDs in a given cluster + // gatherClusterInfo returns the current lists of podutils, pod UIDs and pvc UIDs in a given cluster gatherClusterInfo := func(namespace string, clusterName string) ([]string, []types.UID, []types.UID, error) { var podNames []string var podUIDs []types.UID var pvcUIDs []types.UID - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podNames = append(podNames, pod.GetName()) @@ -72,7 +80,10 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun namespace string, clusterName string, imageName string, expectedInstances int, timeout int, ) { Eventually(func() (int32, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return 0, err + } updatedPods := int32(0) for _, pod := range podList.Items { // We need to check if a pod is ready, otherwise we @@ -94,7 +105,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun }, timeout).Should(BeEquivalentTo(expectedInstances)) } - // Verify that after an update all the pods are ready and running + // Verify that after an update all the podutils are ready and running // an updated image AssertUpdateImage := func(namespace string, clusterName string) { // TODO: the nodes are downloading the image sequentially, @@ -112,25 +123,26 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun var cluster *apiv1.Cluster Eventually(func(g Gomega) error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.ImageName = updatedImageName return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) // All the postgres containers should have the updated image AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, timeout) - // Setting up a cluster with three pods is slow, usually 200-600s - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + // Setting up a cluster with three podutils is slow, usually 200-600s + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) } // Verify that the pod name changes amount to an expected number - AssertChangedNames := func(namespace string, clusterName string, + AssertChangedNames := func( + namespace string, clusterName string, originalPodNames []string, expectedUnchangedNames int, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) matchingNames := 0 for _, pod := range podList.Items { @@ -146,10 +158,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } // Verify that the pod UIDs changes are the expected number - AssertNewPodsUID := func(namespace string, clusterName string, + AssertNewPodsUID := func( + namespace string, clusterName string, originalPodUID []types.UID, expectedUnchangedUIDs int, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) matchingUID := 0 for _, pod := range podList.Items { @@ -165,10 +178,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } // Verify that the PVC UIDs changes are the expected number - AssertChangedPvcUID := func(namespace string, clusterName string, + AssertChangedPvcUID := func( + namespace string, clusterName string, originalPVCUID []types.UID, expectedUnchangedPvcUIDs int, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) matchingPVC := 0 for _, pod := range podList.Items { @@ -191,14 +205,15 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } // Verify that the -rw endpoint points to the expected primary - AssertPrimary := func(namespace, clusterName string, + AssertPrimary := func( + namespace, clusterName string, oldPrimaryPod *corev1.Pod, expectNewPrimaryIdx bool, ) { var cluster *apiv1.Cluster var err error Eventually(func(g Gomega) { - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) if expectNewPrimaryIdx { g.Expect(cluster.Status.CurrentPrimary).ToNot(BeEquivalentTo(oldPrimaryPod.Name)) @@ -208,42 +223,31 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun }, RetryTimeout).Should(Succeed()) // Get the new current primary Pod - currentPrimaryPod, err := env.GetPod(namespace, cluster.Status.CurrentPrimary) + currentPrimaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, cluster.Status.CurrentPrimary) Expect(err).ToNot(HaveOccurred()) endpointName := clusterName + "-rw" - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } // we give 10 seconds to the apiserver to update the endpoint timeout := 10 Eventually(func() (string, error) { - endpoint := &corev1.Endpoints{} - err := env.Client.Get(env.Ctx, endpointNamespacedName, endpoint) - return testsUtils.FirstEndpointIP(endpoint), err + endpointSlice, err := testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, endpointName) + return testsUtils.FirstEndpointSliceIP(endpointSlice), err }, timeout).Should(BeEquivalentTo(currentPrimaryPod.Status.PodIP)) } - // Verify that the IPs of the pods match the ones in the -r endpoint and - // that the amount of pods is the expected one + // Verify that the IPs of the podutils match the ones in the -r endpoint and + // that the amount of podutils is the expected one AssertReadyEndpoint := func(namespace string, clusterName string, expectedEndpoints int) { - endpointName := clusterName + "-r" - endpoint := &corev1.Endpoints{} - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } - err := env.Client.Get(env.Ctx, endpointNamespacedName, - endpoint) + readServiceName := clusterName + "-r" + endpointSlice, err := testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, readServiceName) Expect(err).ToNot(HaveOccurred()) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(expectedEndpoints, err).To(BeEquivalentTo(len(podList.Items))) matchingIP := 0 for _, pod := range podList.Items { ip := pod.Status.PodIP - for _, addr := range endpoint.Subsets[0].Addresses { - if ip == addr.IP { + for _, endpoint := range endpointSlice.Endpoints { + if ip == endpoint.Addresses[0] { matchingIP++ } } @@ -251,7 +255,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(matchingIP).To(BeEquivalentTo(expectedEndpoints)) } - AssertRollingUpdate := func(namespace string, clusterName string, + AssertRollingUpdate := func( + namespace string, clusterName string, sampleFile string, expectNewPrimaryIdx bool, ) { var originalPodNames []string @@ -261,12 +266,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun AssertCreateCluster(namespace, clusterName, sampleFile, env) // Gather the number of instances in this Cluster - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) clusterInstances := cluster.Spec.Instances // Gather the original primary Pod - originalPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + originalPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Gathering info on the current state", func() { @@ -276,18 +281,18 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun By("updating the cluster definition", func() { AssertUpdateImage(namespace, clusterName) }) - // Since we're using a pvc, after the update the pods should + // Since we're using a pvc, after the update the podutils should // have been created with the same name using the same pvc. // Here we check that the names we've saved at the beginning - // of the It are the same names of the current pods. - By("checking that the names of the pods have not changed", func() { + // of the It are the same names of the current podutils. + By("checking that the names of the podutils have not changed", func() { AssertChangedNames(namespace, clusterName, originalPodNames, clusterInstances) }) // Even if they have the same names, they should have different - // UIDs, as the pods are new. Here we check that the UID + // UIDs, as the podutils are new. Here we check that the UID // we've saved at the beginning of the It don't match the // current ones. - By("checking that the pods are new ones", func() { + By("checking that the podutils are new ones", func() { AssertNewPodsUID(namespace, clusterName, originalPodUID, 0) }) // The PVC get reused, so they should have the same UID @@ -302,13 +307,13 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun By("having the current primary on the new TargetPrimary", func() { AssertPrimary(namespace, clusterName, originalPrimaryPod, expectNewPrimaryIdx) }) - // Check that the new pods are included in the endpoint + // Check that the new podutils are included in the endpoint By("having each pod included in the -r service", func() { AssertReadyEndpoint(namespace, clusterName, clusterInstances) }) } - newImageCatalog := func(namespace string, name string, major int, image string) *apiv1.ImageCatalog { + newImageCatalog := func(namespace string, name string, major uint64, image string) *apiv1.ImageCatalog { imgCat := &apiv1.ImageCatalog{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -318,7 +323,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Images: []apiv1.CatalogImage{ { Image: image, - Major: major, + Major: int(major), }, }, }, @@ -328,7 +333,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } newImageCatalogCluster := func( - namespace string, name string, major int, instances int, storageClass string, + namespace string, name string, major uint64, instances int, storageClass string, ) *apiv1.Cluster { cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -339,11 +344,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Instances: instances, ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - APIGroup: &apiv1.GroupVersion.Group, + APIGroup: &apiv1.SchemeGroupVersion.Group, Name: name, Kind: "ImageCatalog", }, - Major: major, + Major: int(major), }, PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ @@ -376,7 +381,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun return cluster } - newClusterImageCatalog := func(name string, major int, image string) *apiv1.ClusterImageCatalog { + newClusterImageCatalog := func(name string, major uint64, image string) *apiv1.ClusterImageCatalog { imgCat := &apiv1.ClusterImageCatalog{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -385,7 +390,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Images: []apiv1.CatalogImage{ { Image: image, - Major: major, + Major: int(major), }, }, }, @@ -408,15 +413,15 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(err).ToNot(HaveOccurred()) err = env.Client.Create(env.Ctx, cluster) Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) // Gather the number of instances in this Cluster - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) clusterInstances := cluster.Spec.Instances // Gather the original primary Pod - originalPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + originalPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Gathering info on the current state", func() { @@ -430,20 +435,20 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(err).ToNot(HaveOccurred()) }) AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, 900) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) - // Since we're using a pvc, after the update the pods should + // Since we're using a pvc, after the update the podutils should // have been created with the same name using the same pvc. // Here we check that the names we've saved at the beginning - // of the It are the same names of the current pods. - By("checking that the names of the pods have not changed", func() { + // of the It are the same names of the current podutils. + By("checking that the names of the podutils have not changed", func() { AssertChangedNames(namespace, clusterName, originalPodNames, clusterInstances) }) // Even if they have the same names, they should have different - // UIDs, as the pods are new. Here we check that the UID + // UIDs, as the podutils are new. Here we check that the UID // we've saved at the beginning of the It don't match the // current ones. - By("checking that the pods are new ones", func() { + By("checking that the podutils are new ones", func() { AssertNewPodsUID(namespace, clusterName, originalPodUID, 0) }) // The PVC get reused, so they should have the same UID @@ -458,7 +463,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun By("having the current primary on the new TargetPrimary", func() { AssertPrimary(namespace, clusterName, originalPrimaryPod, expectNewPrimaryIdx) }) - // Check that the new pods are included in the endpoint + // Check that the new podutils are included in the endpoint By("having each pod included in the -r service", func() { AssertReadyEndpoint(namespace, clusterName, clusterInstances) }) @@ -476,9 +481,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertRollingUpdate(namespace, clusterName, sampleFile, true) }) @@ -495,9 +500,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertRollingUpdate(namespace, clusterName, sampleFile, false) }) @@ -509,9 +514,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun sampleFile = fixturesDir + "/rolling_updates/cluster-using-primary-update-method.yaml.template" ) It("can do rolling update", func() { - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertRollingUpdate(namespace, clusterName, sampleFile, false) }) @@ -522,7 +527,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun var storageClass string var preRollingImg string var updatedImageName string - var major int + var pgVersion version.Data BeforeEach(func() { storageClass = os.Getenv("E2E_DEFAULT_STORAGE_CLASS") preRollingImg = os.Getenv("E2E_PRE_ROLLING_UPDATE_IMG") @@ -533,11 +538,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // We automate the extraction of the major version from the image, because we don't want to keep maintaining // the major version in the test - version, err := postgres.GetPostgresVersionFromTag(utils.GetImageTag(preRollingImg)) + var err error + pgVersion, err = version.FromTag(reference.New(preRollingImg).Tag) if err != nil { Expect(err).ToNot(HaveOccurred()) } - major = postgres.GetPostgresMajorVersion(version) / 10000 }) Context("ImageCatalog", func() { @@ -554,12 +559,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create a new image catalog and a new cluster - catalog := newImageCatalog(namespace, clusterName, major, preRollingImg) - cluster := newImageCatalogCluster(namespace, clusterName, major, 3, storageClass) + catalog := newImageCatalog(namespace, clusterName, pgVersion.Major(), preRollingImg) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 3, storageClass) AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, true) }) @@ -574,11 +579,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - catalog := newImageCatalog(namespace, clusterName, major, preRollingImg) - cluster := newImageCatalogCluster(namespace, clusterName, major, 1, storageClass) + catalog := newImageCatalog(namespace, clusterName, pgVersion.Major(), preRollingImg) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 1, storageClass) AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, false) }) }) @@ -589,7 +594,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun ) var catalog *apiv1.ClusterImageCatalog BeforeEach(func() { - catalog = newClusterImageCatalog(clusterName, major, preRollingImg) + catalog = newClusterImageCatalog(clusterName, pgVersion.Major(), preRollingImg) }) AfterEach(func() { err := env.Client.Delete(env.Ctx, catalog) @@ -598,7 +603,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // Wait until we really deleted it Eventually(func() error { return env.Client.Get(env.Ctx, ctrl.ObjectKey{Name: catalog.Name}, catalog) - }, 30).Should(MatchError(apierrs.IsNotFound, metav1.StatusReasonNotFound)) + }, 30).Should(MatchError(apierrs.IsNotFound, string(metav1.StatusReasonNotFound))) }) Context("Three Instances", func() { const ( @@ -610,10 +615,10 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - cluster := newImageCatalogCluster(namespace, clusterName, major, 3, storageClass) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 3, storageClass) cluster.Spec.ImageCatalogRef.Kind = "ClusterImageCatalog" AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, true) }) @@ -628,10 +633,10 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - cluster := newImageCatalogCluster(namespace, clusterName, major, 1, storageClass) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 1, storageClass) cluster.Spec.ImageCatalogRef.Kind = "ClusterImageCatalog" AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, false) }) diff --git a/tests/e2e/scaling_test.go b/tests/e2e/scaling_test.go index 000030e850..1dcab4cf7d 100644 --- a/tests/e2e/scaling_test.go +++ b/tests/e2e/scaling_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -20,7 +23,7 @@ import ( "fmt" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,7 +50,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati const namespacePrefix = "cluster-scale-e2e-with-slots" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithReplicationSlots, env) @@ -55,7 +58,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Add a node to the cluster and verify the cluster has one more // element By("adding an instance to the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 300 AssertClusterIsReady(namespace, clusterName, timeout, env) @@ -66,7 +69,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Remove a node from the cluster and verify the cluster has one // element less By("removing an instance from the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 60 AssertClusterIsReady(namespace, clusterName, timeout, env) @@ -84,14 +87,14 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "cluster-scale-e2e" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithoutReplicationSlots, env) // Add a node to the cluster and verify the cluster has one more // element By("adding an instance to the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 300 AssertClusterIsReady(namespace, clusterName, timeout, env) @@ -101,7 +104,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Remove a node from the cluster and verify the cluster has one // element less By("removing an instance from the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 60 AssertClusterIsReady(namespace, clusterName, timeout, env) diff --git a/tests/e2e/self_fencing_test.go b/tests/e2e/self_fencing_test.go new file mode 100644 index 0000000000..0e767ed669 --- /dev/null +++ b/tests/e2e/self_fencing_test.go @@ -0,0 +1,177 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDisruptive), func() { + const ( + level = tests.Lowest + namespacePrefix = "self-fencing" + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsLocal() { + Skip("This test is only run on local cluster") + } + }) + + verifyIsolatedPrimary := func(namespace, isolatedPod, isolatedNode string, livenessPingerEnabled bool) { + By("verifying the isolatedPod behaviour", func() { + defaultCommand := fmt.Sprintf( + "docker exec %v crictl ps -a -q "+ + "--label io.kubernetes.pod.namespace=%s,io.kubernetes.pod.name=%s "+ + "--name postgres", isolatedNode, namespace, isolatedPod) + + if livenessPingerEnabled { + Eventually(func(g Gomega) { + out, _, err := run.Unchecked(fmt.Sprintf("%s -s Exited", defaultCommand)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).ToNot(BeEmpty()) + if out != "" { + GinkgoWriter.Printf("Container %s (%s) has been terminated\n", + isolatedPod, strings.TrimSpace(out)) + } + }, 120).Should(Succeed()) + } else { + Consistently(func(g Gomega) { + out, _, err := run.Unchecked(fmt.Sprintf("%s -s Running", defaultCommand)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).ToNot(BeEmpty()) + if out != "" { + GinkgoWriter.Printf("Container %s (%s) is still running\n", + isolatedPod, strings.TrimSpace(out)) + } + }, 20, 5).Should(Succeed()) + } + }) + } + + assertLivenessPinger := func(clusterManifest string, livenessPingerEnabled bool) { + var namespace, clusterName, isolatedNode string + var err error + var oldPrimaryPod *corev1.Pod + + DeferCleanup(func() { + // Ensure the isolatedNode networking is re-established + if CurrentSpecReport().Failed() { + _, _, _ = run.Unchecked(fmt.Sprintf("docker network connect kind %v", isolatedNode)) + } + }) + + By("creating a Cluster", func() { + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) + Expect(err).ToNot(HaveOccurred()) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, clusterManifest, env) + }) + + By("setting up the environment", func() { + // Ensure the operator is not running on the same node as the primary. + // If it is, we switch to a new primary + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + operatorPod, err := operator.GetPod(env.Ctx, env.Client) + Expect(err).NotTo(HaveOccurred()) + if primaryPod.Spec.NodeName == operatorPod.Spec.NodeName { + AssertSwitchover(namespace, clusterName, env) + } + }) + + By("disconnecting the node containing the primary", func() { + oldPrimaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + isolatedNode = oldPrimaryPod.Spec.NodeName + _, _, err = run.Unchecked(fmt.Sprintf("docker network disconnect kind %v", isolatedNode)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("verifying that a new primary has been promoted", func() { + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseFailOver}, 120) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.CurrentPrimary).ToNot(BeEquivalentTo(oldPrimaryPod.Name)) + }, testTimeouts[timeouts.NewPrimaryAfterFailover]).Should(Succeed()) + }) + + verifyIsolatedPrimary(namespace, oldPrimaryPod.Name, isolatedNode, livenessPingerEnabled) + + By("reconnecting the isolated Node", func() { + _, _, err = run.Unchecked(fmt.Sprintf("docker network connect kind %v", isolatedNode)) + Expect(err).ToNot(HaveOccurred()) + + // Assert that the oldPrimary comes back as a replica + namespacedName := types.NamespacedName{ + Namespace: oldPrimaryPod.Namespace, + Name: oldPrimaryPod.Name, + } + timeout := 180 + Eventually(func(g Gomega) { + pod := corev1.Pod{} + err := env.Client.Get(env.Ctx, namespacedName, &pod) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(utils.IsPodActive(pod)).To(BeTrue()) + g.Expect(utils.IsPodReady(pod)).To(BeTrue()) + g.Expect(specs.IsPodStandby(pod)).To(BeTrue()) + g.Expect(nodes.IsNodeReachable(env.Ctx, env.Client, isolatedNode)).To(BeTrue()) + }, timeout).Should(Succeed()) + }) + } + + When("livenessPinger is enabled", func() { + const sampleFile = fixturesDir + "/self-fencing/cluster-liveness-pinger-enabled.yaml.template" + It("will terminate an isolated primary", func() { + assertLivenessPinger(sampleFile, true) + }) + }) + + When("livenessPinger is disabled", func() { + const sampleFile = fixturesDir + "/self-fencing/cluster-liveness-pinger-disabled.yaml.template" + It("will not restart an isolated primary", func() { + assertLivenessPinger(sampleFile, false) + }) + }) +}) diff --git a/tests/e2e/storage_expansion_test.go b/tests/e2e/storage_expansion_test.go index 7b27204806..8eb3d51a71 100644 --- a/tests/e2e/storage_expansion_test.go +++ b/tests/e2e/storage_expansion_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -21,7 +24,8 @@ import ( "os" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,7 +57,10 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { // Initializing namespace variable to be used in test case namespacePrefix = "storage-expansion-true" // Extracting bool value of AllowVolumeExpansion - allowExpansion, err := utils.GetStorageAllowExpansion(defaultStorageClass, env) + allowExpansion, err := storage.GetStorageAllowExpansion( + env.Ctx, env.Client, + defaultStorageClass, + ) Expect(err).ToNot(HaveOccurred()) if (allowExpansion == nil) || (*allowExpansion == false) { Skip(fmt.Sprintf("AllowedVolumeExpansion is false on %v", defaultStorageClass)) @@ -63,7 +70,7 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { It("expands PVCs via online resize", func() { var err error // Creating namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a cluster with three nodes AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -76,7 +83,10 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { // Initializing namespace variable to be used in test case namespacePrefix = "storage-expansion-false" // Extracting bool value of AllowVolumeExpansion - allowExpansion, err := utils.GetStorageAllowExpansion(defaultStorageClass, env) + allowExpansion, err := storage.GetStorageAllowExpansion( + env.Ctx, env.Client, + defaultStorageClass, + ) Expect(err).ToNot(HaveOccurred()) if (allowExpansion != nil) && (*allowExpansion == true) { Skip(fmt.Sprintf("AllowedVolumeExpansion is true on %v", defaultStorageClass)) @@ -85,20 +95,20 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { It("expands PVCs via offline resize", func() { var err error // Creating namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("update cluster for resizeInUseVolumes as false", func() { // Updating cluster with 'resizeInUseVolumes' sets to 'false' in storage. // Check if operator does not return error Eventually(func() error { - _, _, err = utils.RunUnchecked("kubectl patch cluster " + clusterName + " -n " + namespace + + _, _, err = run.Unchecked("kubectl patch cluster " + clusterName + " -n " + namespace + " -p '{\"spec\":{\"storage\":{\"resizeInUseVolumes\":false}}}' --type=merge") if err != nil { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) }) OfflineResizePVC(namespace, clusterName, 600) }) diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index b00cad1623..1df88660e5 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -22,9 +25,9 @@ import ( "testing" "time" - "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/onsi/ginkgo/v2/types" "github.com/thoas/go-funk" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,31 +37,35 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" cnpgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/cloudvendors" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/sternmultitailer" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) const ( - fixturesDir = "./fixtures" - RetryTimeout = utils.RetryTimeout - PollingTime = utils.PollingTime - psqlClientNamespace = "psql-client-namespace" + fixturesDir = "./fixtures" + RetryTimeout = environment.RetryTimeout + PollingTime = objects.PollingTime ) var ( - env *utils.TestingEnvironment + env *environment.TestingEnvironment testLevelEnv *tests.TestEnvLevel - testCloudVendorEnv *utils.TestEnvVendor - psqlClientPod *corev1.Pod + testCloudVendorEnv *cloudvendors.TestEnvVendor expectedOperatorPodName string operatorPodWasRenamed bool operatorWasRestarted bool quickDeletionPeriod = int64(1) - testTimeouts map[utils.Timeout]int - minioEnv = &utils.MinioEnv{ + testTimeouts map[timeouts.Timeout]int + minioEnv = &minio.Env{ Namespace: "minio", ServiceName: "minio-service.minio", CaSecretName: "minio-server-ca-secret", @@ -68,48 +75,43 @@ var ( var _ = SynchronizedBeforeSuite(func() []byte { var err error - env, err = utils.NewTestingEnvironment() + env, err = environment.NewTestingEnvironment() Expect(err).ShouldNot(HaveOccurred()) // Start stern to write the logs of every pod we are interested in. Since we don't have a way to have a selector // matching both the operator's and the clusters' pods, we need to start stern twice. sternClustersCtx, sternClusterCancel := context.WithCancel(env.Ctx) sternClusterDoneChan := sternmultitailer.StreamLogs(sternClustersCtx, env.Interface, clusterPodsLabelSelector(), - env.SternLogDir) + namespaces.SternLogDirectory) DeferCleanup(func() { sternClusterCancel() <-sternClusterDoneChan }) sternOperatorCtx, sternOperatorCancel := context.WithCancel(env.Ctx) sternOperatorDoneChan := sternmultitailer.StreamLogs(sternOperatorCtx, env.Interface, operatorPodsLabelSelector(), - env.SternLogDir) + namespaces.SternLogDirectory) DeferCleanup(func() { sternOperatorCancel() <-sternOperatorDoneChan }) - psqlPod, err := utils.GetPsqlClient(psqlClientNamespace, env) - Expect(err).ShouldNot(HaveOccurred()) - DeferCleanup(func() { - err := env.DeleteNamespaceAndWait(psqlClientNamespace, 300) - Expect(err).ToNot(HaveOccurred()) - }) + _ = corev1.AddToScheme(env.Scheme) + _ = appsv1.AddToScheme(env.Scheme) // Set up a global MinIO service on his own namespace - err = env.CreateNamespace(minioEnv.Namespace) + err = namespaces.CreateNamespace(env.Ctx, env.Client, minioEnv.Namespace) Expect(err).ToNot(HaveOccurred()) DeferCleanup(func() { - err := env.DeleteNamespaceAndWait(minioEnv.Namespace, 300) + err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, minioEnv.Namespace, 300) Expect(err).ToNot(HaveOccurred()) }) - minioEnv.Timeout = uint(testTimeouts[utils.MinioInstallation]) - minioClient, err := utils.MinioDeploy(minioEnv, env) + minioEnv.Timeout = uint(testTimeouts[timeouts.MinioInstallation]) + minioClient, err := minio.Deploy(minioEnv, env) Expect(err).ToNot(HaveOccurred()) caSecret := minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) minioEnv.CaSecretObj = *caSecret objs := map[string]corev1.Pod{ - "psql": *psqlPod, "minio": *minioClient, } @@ -123,7 +125,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { var err error // We are creating new testing env object again because above testing env can not serialize and // accessible to all nodes (specs) - if env, err = utils.NewTestingEnvironment(); err != nil { + if env, err = environment.NewTestingEnvironment(); err != nil { panic(err) } @@ -134,11 +136,11 @@ var _ = SynchronizedBeforeSuite(func() []byte { panic(err) } - if testTimeouts, err = utils.Timeouts(); err != nil { + if testTimeouts, err = timeouts.Timeouts(); err != nil { panic(err) } - if testCloudVendorEnv, err = utils.TestCloudVendor(); err != nil { + if testCloudVendorEnv, err = cloudvendors.TestCloudVendor(); err != nil { panic(err) } @@ -147,19 +149,9 @@ var _ = SynchronizedBeforeSuite(func() []byte { panic(err) } - psqlClientPod = objs["psql"] minioEnv.Client = objs["minio"] }) -var _ = ReportAfterSuite("Gathering failed reports", func(report Report) { - // Keep the logs of the operator and the clusters in case of failure - // If everything is skipped, env has not been initialized, and we'll have nothing to clean up - if report.SuiteSucceeded && env != nil { - err := fileutils.RemoveDirectory(env.SternLogDir) - Expect(err).ToNot(HaveOccurred()) - } -}) - var _ = BeforeEach(func() { labelsForTestsBreakingTheOperator := []string{"upgrade", "disruptive"} breakingLabelsInCurrentTest := funk.Join(CurrentSpecReport().Labels(), @@ -169,7 +161,7 @@ var _ = BeforeEach(func() { return } - operatorPod, err := env.GetOperatorPod() + operatorPod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) if operatorPodWasRenamed { @@ -202,14 +194,14 @@ var _ = AfterEach(func() { if len(breakingLabelsInCurrentTest.([]string)) != 0 { return } - operatorPod, err := env.GetOperatorPod() + operatorPod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - wasRenamed := utils.OperatorPodRenamed(operatorPod, expectedOperatorPodName) + wasRenamed := operator.PodRenamed(operatorPod, expectedOperatorPodName) if wasRenamed { operatorPodWasRenamed = true Fail("operator was renamed") } - wasRestarted := utils.OperatorPodRestarted(operatorPod) + wasRestarted := operator.PodRestarted(operatorPod) if wasRestarted { operatorWasRestarted = true Fail("operator was restarted") diff --git a/tests/e2e/switchover_test.go b/tests/e2e/switchover_test.go index 4801a8f5ff..5b6955173a 100644 --- a/tests/e2e/switchover_test.go +++ b/tests/e2e/switchover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,9 +45,9 @@ var _ = Describe("Switchover", Serial, Label(tests.LabelSelfHealing), func() { // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "switchover-e2e-with-slots" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFileWithReplicationSlots) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithReplicationSlots) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithReplicationSlots, env) @@ -57,9 +61,9 @@ var _ = Describe("Switchover", Serial, Label(tests.LabelSelfHealing), func() { // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "switchover-e2e" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFileWithoutReplicationSlots) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithoutReplicationSlots) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithoutReplicationSlots, env) diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index e0384579a9..912240c543 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -20,12 +23,22 @@ import ( "fmt" "strconv" "strings" + "time" + corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -40,42 +53,109 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) getSyncReplicationCount := func(namespace, clusterName, syncState string, expectedCount int) { - Eventually(func() (int, error, error) { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) int { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) - out, stdErr, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + out, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.GetName(), }, "postgres", - fmt.Sprintf("SELECT count(*) from pg_stat_replication WHERE sync_state = '%s'", syncState)) - Expect(stdErr).To(BeEmpty()) - Expect(err).ShouldNot(HaveOccurred()) + fmt.Sprintf("SELECT count(*) from pg_catalog.pg_stat_replication WHERE sync_state = '%s'", syncState)) + g.Expect(stdErr).To(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) value, atoiErr := strconv.Atoi(strings.Trim(out, "\n")) - return value, err, atoiErr + g.Expect(atoiErr).ToNot(HaveOccurred()) + return value }, RetryTimeout).Should(BeEquivalentTo(expectedCount)) } compareSynchronousStandbyNames := func(namespace, clusterName, element string) { - Eventually(func() string { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) - out, stdErr, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + out, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.GetName(), }, "postgres", - "select setting from pg_settings where name = 'synchronous_standby_names'") - Expect(stdErr).To(BeEmpty()) - Expect(err).ShouldNot(HaveOccurred()) + "select setting from pg_catalog.pg_settings where name = 'synchronous_standby_names'") + g.Expect(stdErr).To(BeEmpty()) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(strings.Trim(out, "\n")).To(ContainSubstring(element)) + }, 30).Should(Succeed()) + } + + assertProbeRespectsReplicaLag := func(namespace, replicaName, probeType string) { + By(fmt.Sprintf( + "checking that %s probe of replica %s is waiting for lag to decrease before marking the pod ready", + probeType, replicaName), func() { + timeout := 2 * time.Minute + + // This "Eventually" block is needed because we may grab only a portion + // of the replica logs, and the "ParseJSONLogs" function may fail on the latest + // log record when this happens + Eventually(func(g Gomega) { + data, err := logs.ParseJSONLogs(env.Ctx, env.Interface, namespace, replicaName) + g.Expect(err).ToNot(HaveOccurred()) + + recordWasFound := false + for _, record := range data { + err, ok := record["err"].(string) + if !ok { + continue + } + msg, ok := record["msg"].(string) + if !ok { + continue + } + + if msg == fmt.Sprintf("%s probe failing", probeType) && + strings.Contains(err, "streaming replica lagging") { + recordWasFound = true + break + } + } + + g.Expect(recordWasFound).To( + BeTrue(), + fmt.Sprintf("The %s probe is preventing the replica from being marked ready", probeType), + ) + }, timeout).Should(Succeed()) + }) + } + + generateDataLoad := func(namespace, clusterName string) { + By("adding data to the primary", func() { + commandTimeout := time.Second * 600 + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) - return strings.Trim(out, "\n") - }, 30).Should(ContainSubstring(element)) + // This will generate 1Gi of data in the primary node and, since the replica we fenced + // is not aligned, will generate lag. + _, _, err = exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, + *primary, specs.PostgresContainerName, &commandTimeout, + "psql", + "-U", + "postgres", + "-c", + "create table numbers (i integer); "+ + "insert into numbers (select generate_series(1,1000000)); "+ + "insert into numbers (select * from numbers); "+ + "insert into numbers (select * from numbers); "+ + "insert into numbers (select * from numbers); ", + ) + Expect(err).ToNot(HaveOccurred()) + }) } Context("Legacy synchronous replication", func() { @@ -86,11 +166,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { namespacePrefix = "legacy-sync-replicas-e2e" sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica-legacy.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -101,21 +181,22 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("checking that synchronous_standby_names reflects cluster's changes", func() { // Set MaxSyncReplicas to 1 - Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.MaxSyncReplicas = 1 - return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, RetryTimeout, 5).Should(Succeed()) // Scale the cluster down to 2 pods - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, + clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 120 // Wait for pod 3 to be completely terminated Eventually(func() (int, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) return len(podList.Items), err }, timeout).Should(BeEquivalentTo(2)) @@ -124,14 +205,14 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") }) By("failing when SyncReplicas fields are invalid", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Expect an error. MaxSyncReplicas must be lower than the number of instances cluster.Spec.MaxSyncReplicas = 2 err = env.Client.Update(env.Ctx, cluster) Expect(err).To(HaveOccurred()) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Expect an error. MinSyncReplicas must be lower than MaxSyncReplicas cluster.Spec.MinSyncReplicas = 2 @@ -145,7 +226,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { namespacePrefix = "sync-replicas-statstatements" sampleFile = fixturesDir + "/sync_replicas/cluster-pgstatstatements.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Are extensions a problem with synchronous replication? No, absolutely not, @@ -156,7 +237,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { // bootstrapping the cluster, the CREATE EXTENSION instruction will block // the primary since the desired number of synchronous replicas (even when 1) // is not met. - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -170,18 +251,17 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) Context("Synchronous replication", func() { - var namespace string - It("can manage quorum/priority based synchronous replication", func() { + var namespace string const ( namespacePrefix = "sync-replicas-e2e" sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -191,42 +271,228 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("setting MaxStandbyNamesFromCluster to 1 and decreasing to 1 the sync replicas required", func() { - Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = ptr.To(1) cluster.Spec.PostgresConfiguration.Synchronous.Number = 1 - return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, RetryTimeout, 5).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "quorum", 1) compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") }) By("switching to MethodFirst (priority-based)", func() { - Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.Method = apiv1.SynchronousReplicaConfigurationMethodFirst - return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, RetryTimeout, 5).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "sync", 1) compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1") }) By("by properly setting standbyNamesPre and standbyNamesPost", func() { - Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = nil cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre = []string{"preSyncReplica"} cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPost = []string{"postSyncReplica"} - return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, RetryTimeout, 5).Should(Succeed()) compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1 (\"preSyncReplica\"") compareSynchronousStandbyNames(namespace, clusterName, "\"postSyncReplica\")") }) }) + + Context("data durability is preferred", func() { + It("will decrease the number of sync replicas to the number of available replicas", func() { + var namespace string + const ( + namespacePrefix = "sync-replicas-preferred" + sampleFile = fixturesDir + "/sync_replicas/preferred.yaml.template" + ) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + + By("verifying we have 2 quorum-based replicas", func() { + getSyncReplicationCount(namespace, clusterName, "quorum", 2) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") + }) + + By("fencing a replica and verifying we have only 1 quorum-based replica", func() { + Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + getSyncReplicationCount(namespace, clusterName, "quorum", 1) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") + }) + By("fencing the second replica and verifying we unset synchronous_standby_names", func() { + Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + Eventually(func(g Gomega) { + commandTimeout := time.Second * 10 + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + + stdout, _, err := exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, + *primary, specs.PostgresContainerName, &commandTimeout, + "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names", + ) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.Trim(stdout, "\n")).To(BeEmpty()) + }, 160).Should(Succeed()) + }) + By("unfencing the replicas and verifying we have 2 quorum-based replicas", func() { + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + getSyncReplicationCount(namespace, clusterName, "quorum", 2) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") + }) + }) + }) + + Context("Lag-control in startup & readiness probes", func() { + var ( + namespace string + namespacePrefix string + sampleFile string + clusterName string + fencedReplicaName string + err error + ) + + setupClusterWithLaggingReplica := func() { + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + + // Set our target fencedReplica + fencedReplicaName = fmt.Sprintf("%s-2", clusterName) + + By("verifying we have 2 quorum-based replicas", func() { + getSyncReplicationCount(namespace, clusterName, "quorum", 2) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") + }) + + By("fencing a replica and verifying we have only 1 quorum-based replica", func() { + Expect(fencing.On(env.Ctx, env.Client, fencedReplicaName, + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + getSyncReplicationCount(namespace, clusterName, "quorum", 1) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") + }) + + By("waiting for the fenced pod to be not ready", func() { + Eventually(func(g Gomega) bool { + var pod corev1.Pod + err := env.Client.Get(env.Ctx, client.ObjectKey{ + Namespace: namespace, + Name: fencedReplicaName, + }, &pod) + g.Expect(err).ToNot(HaveOccurred()) + + return utils.IsPodReady(pod) + }, 160).Should(BeFalse()) + }) + + generateDataLoad(namespace, clusterName) + } + + It("lag control in startup probe will delay the readiness of replicas", func() { + namespacePrefix = "startup-probe-lag" + sampleFile = fixturesDir + "/sync_replicas/startup-probe-lag-control.yaml.template" + + setupClusterWithLaggingReplica() + + By("stopping the reconciliation loop on the cluster", func() { + // This is needed to avoid the operator to recreate the new Pod when we'll + // delete it. + // We want the Pod to start without being fenced to engage the lag checking + // startup probe + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + origCluster := cluster.DeepCopy() + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + cluster.Annotations[utils.ReconciliationLoopAnnotationName] = "disabled" + + err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(origCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting the test replica and disabling fencing", func() { + var pod corev1.Pod + err := env.Client.Get(env.Ctx, client.ObjectKey{ + Namespace: namespace, + Name: fencedReplicaName, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + err = env.Client.Delete(env.Ctx, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + }) + + By("enabling the reconciliation loops on the cluster", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + origCluster := cluster.DeepCopy() + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + delete(cluster.Annotations, utils.ReconciliationLoopAnnotationName) + + err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(origCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("waiting for the replica to be back again and ready", func() { + Eventually(func(g Gomega) bool { + var pod corev1.Pod + err := env.Client.Get(env.Ctx, client.ObjectKey{ + Namespace: namespace, + Name: fencedReplicaName, + }, &pod) + g.Expect(err).ToNot(HaveOccurred()) + + return utils.IsPodReady(pod) + }, 160).Should(BeTrue()) + }) + + assertProbeRespectsReplicaLag(namespace, fencedReplicaName, "startup") + }) + + It("lag control in readiness probe will delay the readiness of replicas", func() { + namespacePrefix = "readiness-probe-lag" + sampleFile = fixturesDir + "/sync_replicas/readiness-probe-lag-control.yaml.template" + + setupClusterWithLaggingReplica() + + By("disabling fencing", func() { + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + }) + + assertProbeRespectsReplicaLag(namespace, fencedReplicaName, "readiness") + }) + }) }) }) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 899e49524a..00b1cbd672 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +13,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e import ( - "bytes" "context" "fmt" "os" @@ -28,16 +30,27 @@ import ( "strings" "time" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -69,27 +82,42 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, clusterSetup := func(namespace, clusterManifest string) { var err error - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) By("creating a cluster and having it be ready", func() { AssertCreateCluster(namespace, clusterName, clusterManifest, env) }) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) + } - clusterLogs := logs.ClusterStreamingRequest{ - Cluster: cluster, - Options: &corev1.PodLogOptions{ - Follow: true, - }, + // Verify that the tablespace exists on the primary pod of a cluster + hasTablespaceAndOwner := func(cluster *apiv1.Cluster, tablespace, owner string) (bool, error) { + namespace := cluster.Namespace + clusterName := cluster.Name + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return false, err } - var buffer bytes.Buffer - go func() { - defer GinkgoRecover() - err = clusterLogs.SingleStream(context.TODO(), &buffer) - Expect(err).ToNot(HaveOccurred()) - }() + result, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: namespace, + PodName: primaryPod.Name, + }, postgres.AppDBName, + fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_tablespace WHERE spcname = '%s' "+ + "AND pg_catalog.pg_get_userbyid(spcowner) = '%s'", + tablespace, + owner), + ) + if err != nil { + return false, err + } + if stdErr != "" { + return false, fmt.Errorf("error while checking tablespaces: %s", stdErr) + } + return result == "1\n", nil } Context("on a new cluster with tablespaces", Ordered, func() { @@ -104,11 +132,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, ) BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // We create the MinIO credentials required to login into the system - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) + }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, namespace) @@ -119,28 +157,29 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) It("can verify tablespaces and PVC were created", func() { - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.Short]) - AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[testUtils.Short]) - AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[testUtils.Short]) - AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "dante") + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short]) + AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[timeouts.Short]) + AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[timeouts.Short]) + Expect(hasTablespaceAndOwner(cluster, "atablespace", "app")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "anothertablespace", "dante")).To(BeTrue()) }) - It("can update the cluster by change the owner of tablesapce", func() { - cluster, err := env.GetCluster(namespace, clusterName) + It("can update the cluster by change the owner of tablespace", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updateTablespaceOwner(cluster, "anothertablespace", "alpha") - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertTablespaceReconciled(namespace, clusterName, "anothertablespace", testTimeouts[testUtils.Short]) - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") + Eventually(func() (bool, error) { + return hasTablespaceAndOwner(cluster, "anothertablespace", "alpha") + }).WithTimeout(30 * time.Second).Should(BeTrue()) }) It("can update the cluster to set a tablespace as temporary", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("setting the first tablespace as temporary", func() { @@ -163,12 +202,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) It("can create the backup and verify content in the object store", func() { - backupName, err = env.GetResourceNameFromYAML(clusterBackupManifest) + backupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterBackupManifest) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() { - testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, clusterBackupManifest, false, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) }) By("verifying the number of tars in minio", func() { @@ -177,32 +220,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, By("verifying backup status", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastSuccessfulBackup, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastFailedBackup, err - }, 30).Should(BeEmpty()) }) }) It("can update the cluster adding a new tablespace and backup again", func() { By("adding a new tablespace to the cluster", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) addTablespaces(cluster, []apiv1.TablespaceConfiguration{ @@ -218,26 +247,26 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }, }) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) }) By("verifying there are 3 tablespaces and PVCs were created", func() { - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.Spec.Tablespaces).To(HaveLen(3)) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") - AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante") + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + Expect(hasTablespaceAndOwner(cluster, "atablespace", "app")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "anothertablespace", "alpha")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "thirdtablespace", "dante")).To(BeTrue()) }) By("waiting for the cluster to be ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verifying expected number of PVCs for tablespaces", func() { @@ -246,14 +275,15 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("creating a new backup and verifying backup is ready", func() { - backupCondition, err := testUtils.GetConditionsInClusterStatus( + backupCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, + env.Client, namespace, clusterName, - env, apiv1.ConditionBackup, ) Expect(err).ShouldNot(HaveOccurred()) - _, stderr, err := testUtils.Run( + _, stderr, err := run.Run( fmt.Sprintf("kubectl cnpg backup %s -n %s --backup-name %s", clusterName, namespace, fullBackupName)) Expect(stderr).To(BeEmpty()) @@ -266,10 +296,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, ) // TODO: this is to force a CHECKPOINT when we run the backup on standby. - // This should be better handled inside ExecuteBackup + // This should be better handled inside Execute AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - AssertBackupConditionInClusterStatus(namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) }) By("verifying the number of tars in the latest base backup", func() { @@ -282,25 +312,25 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, By("verifying backup status", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastSuccessfulBackup, err + return cluster.Status.LastSuccessfulBackup, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastFailedBackup, err + return cluster.Status.LastFailedBackup, err //nolint:staticcheck }, 30).Should(BeEmpty()) }) }) @@ -313,26 +343,26 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, const clusterRestoreFromBarmanManifest string = fixturesDir + "/tablespaces/restore-cluster-from-barman.yaml.template" - restoredClusterName, err := env.GetResourceNameFromYAML(clusterRestoreFromBarmanManifest) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterRestoreFromBarmanManifest) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot", func() { CreateResourceFromFile(namespace, clusterRestoreFromBarmanManifest) // A delay of 5 min when restoring with tablespaces is normal, let's give extra time - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReadySlow], + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) By("verifying that tablespaces and PVC were created", func() { - restoredCluster, err := env.GetCluster(namespace, restoredClusterName) + restoredCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) AssertClusterHasMountPointsAndVolumesForTablespaces(restoredCluster, 3, - testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[testUtils.Short]) - AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") - AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante") + testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short]) + Expect(hasTablespaceAndOwner(cluster, "atablespace", "app")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "anothertablespace", "alpha")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "thirdtablespace", "dante")).To(BeTrue()) }) }) }) @@ -360,15 +390,24 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, tablespace2 = "tbs2" table2 = "test_tbs2" ) - checkPointTimeout := time.Second * 10 BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // We create the required credentials for MinIO - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) + }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, namespace) @@ -379,30 +418,32 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) It("can verify tablespaces and PVC were created", func() { - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.Short]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short]) }) It("can create the volume snapshot backup declaratively and verify the backup", func() { - backupName, err = env.GetResourceNameFromYAML(clusterVolumesnapshoBackupManifest) + backupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterVolumesnapshoBackupManifest) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() { - backupObject = testUtils.ExecuteBackup( + backupObject = backups.Execute( + env.Ctx, + env.Client, + env.Scheme, namespace, clusterVolumesnapshoBackupManifest, false, - testTimeouts[testUtils.VolumeSnapshotIsReady], - env, + testTimeouts[timeouts.VolumeSnapshotIsReady], ) - AssertBackupConditionInClusterStatus(namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) }) By("checking that volumeSnapshots are properly labeled", func() { Eventually(func(g Gomega) { for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements { - volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name) + volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, snapshot.Name) g.Expect(err).ToNot(HaveOccurred()) g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName)) g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name)) @@ -417,46 +458,56 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, By("inserting test data and creating WALs on the cluster to be snapshotted", func() { // Create a table and insert data 1,2 in each tablespace tl1 := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - TableName: table1, - Tablespace: tablespace1, + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: table1, + Tablespace: tablespace1, } - AssertCreateTestDataInTablespace(tl1, psqlClientPod) + AssertCreateTestData(env, tl1) tl2 := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - TableName: table2, - Tablespace: tablespace2, + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: table2, + Tablespace: tablespace2, } - AssertCreateTestDataInTablespace(tl2, psqlClientPod) + AssertCreateTestData(env, tl2) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Execute a checkpoint - _, _, err = env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &checkPointTimeout, - "psql", "-U", "postgres", "-tAc", "CHECKPOINT") + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, postgres.PostgresDBName, + "CHECKPOINT", + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) }) - backupName = clusterName + utils.GetCurrentTimestampWithFormat("20060102150405") + backupName = clusterName + pgTime.GetCurrentTimestampWithFormat("20060102150405") By("creating a volumeSnapshot and waiting until it's completed", func() { - err := testUtils.CreateOnDemandBackupViaKubectlPlugin( - namespace, - clusterName, - backupName, - apiv1.BackupTargetStandby, - apiv1.BackupMethodVolumeSnapshot, - ) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return backups.CreateOnDemandBackupViaKubectlPlugin( + namespace, + clusterName, + backupName, + apiv1.BackupTargetStandby, + apiv1.BackupMethodVolumeSnapshot, + ) + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) // TODO: this is to force a CHECKPOINT when we run the backup on standby. // This should probably be moved elsewhere AssertArchiveWalOnMinio(namespace, clusterName, clusterName) Eventually(func(g Gomega) { - backupList, err := env.GetBackupList(namespace) + backupList, err := backups.List(env.Ctx, env.Client, namespace) g.Expect(err).ToNot(HaveOccurred()) for _, backup := range backupList.Items { if backup.Name != backupName { @@ -468,13 +519,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, backup.Status.Error) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(4)) } - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("checking that volumeSnapshots are properly labeled", func() { Eventually(func(g Gomega) { for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements { - volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name) + volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, snapshot.Name) g.Expect(err).ToNot(HaveOccurred()) g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName)) g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name)) @@ -490,59 +541,89 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, err = os.Setenv("BACKUP_NAME", backupName) Expect(err).ToNot(HaveOccurred()) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterVolumesnapshoRestoreManifest) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, + clusterVolumesnapshoRestoreManifest) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot", func() { CreateResourceFromFile(namespace, clusterVolumesnapshoRestoreManifest) - AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], + AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) By("verifying that tablespaces and PVC were created", func() { - restoredCluster, err := env.GetCluster(namespace, clusterToRestoreName) + restoredCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, + clusterToRestoreName) Expect(err).ToNot(HaveOccurred()) AssertClusterHasMountPointsAndVolumesForTablespaces(restoredCluster, 2, - testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[testUtils.Short]) + testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short]) }) By("verifying the correct data exists in the restored cluster", func() { - restoredPrimary, err := env.GetClusterPrimary(namespace, clusterToRestoreName) - Expect(err).ToNot(HaveOccurred()) - - AssertDataExpectedCount(namespace, clusterToRestoreName, table1, 2, restoredPrimary) - AssertDataExpectedCount(namespace, clusterToRestoreName, table2, 2, restoredPrimary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: postgres.AppDBName, + TableName: table1, + } + AssertDataExpectedCount(env, tableLocator, 2) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: postgres.AppDBName, + TableName: table2, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) It(fmt.Sprintf("can create the cluster by recovery from volume snapshot backup with pitr %v", backupName), func() { By("inserting test data and creating WALs on the cluster to be snapshotted", func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterName, table1, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table1, 4, psqlClientPod) + insertRecordIntoTable(table1, 3, conn) + insertRecordIntoTable(table1, 4, conn) - insertRecordIntoTable(namespace, clusterName, table2, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table2, 4, psqlClientPod) + insertRecordIntoTable(table2, 3, conn) + insertRecordIntoTable(table2, 4, conn) // Because GetCurrentTimestamp() rounds down to the second and is executed // right after the creation of the test data, we wait for 1s to avoid not // including the newly created data within the recovery_target_time time.Sleep(1 * time.Second) // Get the recovery_target_time and pass it to the template engine - recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env, psqlClientPod) + recoveryTargetTime, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime) Expect(err).ToNot(HaveOccurred()) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterName, table1, 5, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table1, 6, psqlClientPod) + insertRecordIntoTable(table1, 5, conn) + insertRecordIntoTable(table1, 6, conn) - insertRecordIntoTable(namespace, clusterName, table2, 5, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table2, 6, psqlClientPod) + insertRecordIntoTable(table2, 5, conn) + insertRecordIntoTable(table2, 6, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterName, clusterName) @@ -552,38 +633,48 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) Expect(snapshotList.Items).To(HaveLen(len(backupObject.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, TablespaceSnapshotPrefix: snapshotTbsEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backupObject, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backupObject, envVars) Expect(err).ToNot(HaveOccurred()) }) - clusterToPITRName, err := env.GetResourceNameFromYAML(clusterVolumesnapshoPITRManifest) + clusterToPITRName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterVolumesnapshoPITRManifest) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot", func() { CreateResourceFromFile(namespace, clusterVolumesnapshoPITRManifest) - AssertClusterIsReady(namespace, clusterToPITRName, testTimeouts[testUtils.ClusterIsReadySlow], + AssertClusterIsReady(namespace, clusterToPITRName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) By("can verify tablespaces and PVC were created", func() { - recoveryCluster, err := env.GetCluster(namespace, clusterToPITRName) + recoveryCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterToPITRName) Expect(err).ToNot(HaveOccurred()) AssertClusterHasMountPointsAndVolumesForTablespaces(recoveryCluster, 2, - testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(recoveryCluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(recoveryCluster, testTimeouts[testUtils.Short]) + testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(recoveryCluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(recoveryCluster, testTimeouts[timeouts.Short]) }) By("verifying the correct data exists in the restored cluster", func() { - recoveryPrimary, err := env.GetClusterPrimary(namespace, clusterToPITRName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCount(namespace, clusterToPITRName, table1, 4, recoveryPrimary) - AssertDataExpectedCount(namespace, clusterToPITRName, table2, 4, recoveryPrimary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToPITRName, + DatabaseName: postgres.AppDBName, + TableName: table1, + } + AssertDataExpectedCount(env, tableLocator, 4) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: clusterToPITRName, + DatabaseName: postgres.AppDBName, + TableName: table2, + } + AssertDataExpectedCount(env, tableLocator, 4) }) }) }) @@ -594,14 +685,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) clusterSetup(namespace, clusterManifest) }) It("can update cluster by adding tablespaces", func() { By("adding tablespaces to the spec and patching", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeFalse()) @@ -620,30 +711,26 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }, }) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) }) By("verify tablespaces and PVC were created", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) }) By("waiting for the cluster to be ready again", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) }) - It("can hibernate via plugin a cluster with tablespaces", func() { - assertCanHibernateClusterWithTablespaces(namespace, clusterName, testUtils.HibernateImperatively, 2) - }) - It("can hibernate via annotation a cluster with tablespaces", func() { - assertCanHibernateClusterWithTablespaces(namespace, clusterName, testUtils.HibernateDeclaratively, 6) + assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateDeclaratively, 6) }) It("can fence a cluster with tablespaces using the plugin", func() { @@ -652,13 +739,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("fencing the cluster", func() { - err := testUtils.FencingOn(env, "*", namespace, clusterName, testUtils.UsingPlugin) + err := fencing.On(env.Ctx, env.Client, "*", namespace, clusterName, fencing.UsingPlugin) Expect(err).ToNot(HaveOccurred()) }) By("check all instances become not ready", func() { Eventually(func() (bool, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -677,13 +764,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("un-fencing the cluster", func() { - err := testUtils.FencingOff(env, "*", namespace, clusterName, testUtils.UsingPlugin) + err := fencing.Off(env.Ctx, env.Client, "*", namespace, clusterName, fencing.UsingPlugin) Expect(err).ToNot(HaveOccurred()) }) By("all instances become ready", func() { Eventually(func() (bool, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -702,14 +789,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("verify tablespaces and PVC are there", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verifying all PVCs for tablespaces are recreated", func() { @@ -724,14 +811,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) clusterSetup(namespace, clusterManifest) }) It("can update cluster adding tablespaces", func() { By("patch cluster with primaryUpdateMethod=switchover", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeFalse()) @@ -741,10 +828,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) }) By("waiting for the cluster to be ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("adding tablespaces to the spec and patching", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeFalse()) @@ -766,21 +853,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) }) }) It("can verify tablespaces and PVC were created", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) }) }) @@ -804,25 +891,6 @@ func updateTablespaceOwner(cluster *apiv1.Cluster, tablespaceName, newOwner stri Expect(err).ToNot(HaveOccurred()) } -func AssertTablespaceReconciled( - namespace, clusterName, - tablespaceName string, - timeout int, -) { - By(fmt.Sprintf("checking if tablespace %v is in reconciled status", tablespaceName), func() { - Eventually(func(g Gomega) bool { - cluster, err := env.GetCluster(namespace, clusterName) - g.Expect(err).ToNot(HaveOccurred()) - for _, state := range cluster.Status.TablespacesStatus { - if state.State == apiv1.TablespaceStatusReconciled && state.Name == tablespaceName { - return true - } - } - return false - }, timeout).Should(BeTrue()) - }) -} - func AssertRoleReconciled( namespace, clusterName, roleName string, @@ -830,7 +898,7 @@ func AssertRoleReconciled( ) { By(fmt.Sprintf("checking if role %v is in reconciled status", roleName), func() { Eventually(func(g Gomega) bool { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for state, names := range cluster.Status.ManagedRolesStatus.ByStatus { if state == apiv1.RoleStatusReconciled { @@ -847,8 +915,8 @@ func AssertClusterHasMountPointsAndVolumesForTablespaces( numTablespaces int, timeout int, ) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name podMountPaths := func(pod corev1.Pod) (bool, []string) { var hasPostgresContainer bool var mountPaths []string @@ -867,7 +935,7 @@ func AssertClusterHasMountPointsAndVolumesForTablespaces( Eventually(func(g Gomega) { g.Expect(cluster.ContainsTablespaces()).To(BeTrue()) g.Expect(cluster.Spec.Tablespaces).To(HaveLen(numTablespaces)) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { g.Expect(pod.Spec.Containers).ToNot(BeEmpty()) @@ -919,11 +987,11 @@ func getDatabasUserUID(cluster *apiv1.Cluster, dbContainer *corev1.Container) in } func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeout int) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name By("checking all the required PVCs were created", func() { Eventually(func(g Gomega) { - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) g.Expect(err).ShouldNot(HaveOccurred()) var tablespacePvcNames []string for _, pvc := range pvcList.Items { @@ -943,7 +1011,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo } } } - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { for _, tbsConfig := range cluster.Spec.Tablespaces { @@ -955,13 +1023,14 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo By("checking the data directory for the tablespaces is owned by postgres", func() { Eventually(func(g Gomega) { // minio may in the same namespace with cluster pod - pvcList, err := env.GetClusterPodList(namespace, clusterName) + pvcList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ShouldNot(HaveOccurred()) for _, pod := range pvcList.Items { for _, tbsConfig := range cluster.Spec.Tablespaces { dataDir := fmt.Sprintf("/var/lib/postgresql/tablespaces/%s/data", tbsConfig.Name) - owner, stdErr, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + owner, stdErr, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: pod.Name, }, nil, @@ -969,7 +1038,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo ) targetContainer := getPostgresContainer(pod) - Expect(targetContainer).NotTo(BeNil()) + g.Expect(targetContainer).NotTo(BeNil()) dbUser := getDatabasUserUID(cluster, targetContainer) g.Expect(stdErr).To(BeEmpty()) @@ -982,22 +1051,23 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo } func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name By("checking the expected tablespaces are in the database", func() { Eventually(func(g Gomega) { - instances, err := env.GetClusterPodList(namespace, clusterName) + instances, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ShouldNot(HaveOccurred()) var tbsListing string for _, instance := range instances.Items { var stdErr string var err error - tbsListing, stdErr, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{ + tbsListing, stdErr, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: instance.Name, - }, testUtils.DatabaseName("app"), - "SELECT oid, spcname, pg_get_userbyid(spcowner) FROM pg_tablespace;", + }, postgres.AppDBName, + "SELECT oid, spcname, pg_catalog.pg_get_userbyid(spcowner) FROM pg_catalog.pg_tablespace", ) g.Expect(stdErr).To(BeEmpty()) g.Expect(err).ShouldNot(HaveOccurred()) @@ -1011,20 +1081,21 @@ func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { } func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content string) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name By("checking the expected setting in a new PG session", func() { Eventually(func(g Gomega) { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { g.Expect(err).ShouldNot(HaveOccurred()) } - settingValue, stdErr, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + settingValue, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, - }, testUtils.DatabaseName("app"), + }, postgres.AppDBName, "SHOW temp_tablespaces", ) g.Expect(stdErr).To(BeEmpty()) @@ -1036,23 +1107,24 @@ func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content st } func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespaceName string) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { Expect(err).ShouldNot(HaveOccurred()) } By("checking the temporary table is created into the temporary tablespace", func() { - commandOutput, stdErr, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + commandOutput, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, - }, testUtils.DatabaseName("app"), + }, postgres.AppDBName, "CREATE TEMPORARY TABLE cnp_e2e_test_table (i INTEGER); "+ - "SELECT spcname FROM pg_tablespace WHERE OID="+ - "(SELECT reltablespace FROM pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)", + "SELECT spcname FROM pg_catalog.pg_tablespace WHERE OID="+ + "(SELECT reltablespace FROM pg_catalog.pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)", ) Expect(stdErr).To(BeEmpty()) Expect(err).ShouldNot(HaveOccurred()) @@ -1062,30 +1134,10 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace }) } -func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner string) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ShouldNot(HaveOccurred()) - result, stdErr, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ - Namespace: namespace, - PodName: primaryPod.Name, - }, testUtils.DatabaseName("app"), - fmt.Sprintf("SELECT 1 FROM pg_tablespace WHERE spcname = '%s' AND pg_get_userbyid(spcowner) = '%s';", - tablespace, - owner), - ) - Expect(stdErr).To(BeEmpty()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(result).To(Equal("1\n")) - GinkgoWriter.Printf("Found Tablespaces %s with owner %s", tablespace, owner) -} - func assertCanHibernateClusterWithTablespaces( namespace string, clusterName string, - method testUtils.HibernationMethod, + method hibernationMethod, keptPVCs int, ) { By("verifying expected PVCs for tablespaces before hibernate", func() { @@ -1093,13 +1145,13 @@ func assertCanHibernateClusterWithTablespaces( }) By("hibernate the cluster", func() { - err := testUtils.HibernateOn(env, namespace, clusterName, method) + err := hibernateOn(env.Ctx, env.Client, namespace, clusterName, method) Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() { Eventually(func(g Gomega) { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(podList.Items).Should(BeEmpty()) }, 300).Should(Succeed()) }) @@ -1109,22 +1161,22 @@ func assertCanHibernateClusterWithTablespaces( }) By("hibernate off the cluster", func() { - err := testUtils.HibernateOff(env, namespace, clusterName, method) + err := hibernateOff(env.Ctx, env.Client, namespace, clusterName, method) Expect(err).ToNot(HaveOccurred()) }) By("waiting for the cluster to be ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verify tablespaces and PVC are there", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) }) By("verifying all PVCs for tablespaces are recreated", func() { @@ -1135,7 +1187,7 @@ func assertCanHibernateClusterWithTablespaces( func eventuallyHasExpectedNumberOfPVCs(pvcCount int, namespace string) { By(fmt.Sprintf("checking cluster eventually has %d PVCs for tablespaces", pvcCount)) Eventually(func(g Gomega) { - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) g.Expect(err).ShouldNot(HaveOccurred()) tbsPvc := 0 for _, pvc := range pvcList.Items { @@ -1146,12 +1198,12 @@ func eventuallyHasExpectedNumberOfPVCs(pvcCount int, namespace string) { tbsPvc++ } g.Expect(tbsPvc).Should(Equal(pvcCount)) - }, testTimeouts[testUtils.ClusterIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReady]).Should(Succeed()) } func eventuallyHasCompletedBackups(namespace string, numBackups int) { Eventually(func(g Gomega) { - backups, err := env.GetBackupList(namespace) + backups, err := backups.List(env.Ctx, env.Client, namespace) Expect(err).ShouldNot(HaveOccurred()) Expect(backups.Items).To(HaveLen(numBackups)) @@ -1174,7 +1226,7 @@ func latestBaseBackupContainsExpectedTars( // we list the backup.info files to get the listing of base backups // directories in minio backupInfoFiles := filepath.Join("*", clusterName, "base", "*", "*.info") - ls, err := testUtils.ListFilesOnMinio(minioEnv, backupInfoFiles) + ls, err := minio.ListFiles(minioEnv, backupInfoFiles) g.Expect(err).ShouldNot(HaveOccurred()) frags := strings.Split(ls, "\n") slices.Sort(frags) @@ -1182,10 +1234,10 @@ func latestBaseBackupContainsExpectedTars( g.Expect(frags).To(HaveLen(numBackups), report) latestBaseBackup := filepath.Dir(frags[numBackups-1]) tarsInLastBackup := strings.TrimPrefix(filepath.Join(latestBaseBackup, "*.tar"), "minio/") - listing, err := testUtils.ListFilesOnMinio(minioEnv, tarsInLastBackup) + listing, err := minio.ListFiles(minioEnv, tarsInLastBackup) g.Expect(err).ShouldNot(HaveOccurred()) report += fmt.Sprintf("tar listing:\n%s\n", listing) - numTars, err := testUtils.CountFilesOnMinio(minioEnv, tarsInLastBackup) + numTars, err := minio.CountFiles(minioEnv, tarsInLastBackup) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(numTars).To(Equal(expectedTars), report) }, 120).Should(Succeed()) @@ -1208,3 +1260,62 @@ func getSnapshots( return snapshotList, nil } + +type hibernationMethod string + +const ( + // hibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method + hibernateDeclaratively hibernationMethod = "annotation" +) + +func hibernateOn( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + method hibernationMethod, +) error { + switch method { + case hibernateDeclaratively: + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return err + } + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + originCluster := cluster.DeepCopy() + cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOn + + err = crudClient.Patch(context.Background(), cluster, client.MergeFrom(originCluster)) + return err + default: + return fmt.Errorf("unknown method: %v", method) + } +} + +func hibernateOff( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + method hibernationMethod, +) error { + switch method { + case hibernateDeclaratively: + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return err + } + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + originCluster := cluster.DeepCopy() + cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOff + + err = crudClient.Patch(context.Background(), cluster, client.MergeFrom(originCluster)) + return err + default: + return fmt.Errorf("unknown method: %v", method) + } +} diff --git a/tests/e2e/tolerations_test.go b/tests/e2e/tolerations_test.go index c5862874cf..18f06215ae 100644 --- a/tests/e2e/tolerations_test.go +++ b/tests/e2e/tolerations_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -20,7 +23,8 @@ import ( "fmt" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,7 +52,7 @@ var _ = Describe("E2E Tolerations Node", Serial, Label(tests.LabelDisruptive, te AfterEach(func() { for _, node := range taintedNodes { cmd := fmt.Sprintf("kubectl taint node %v %s=test:NoSchedule-", node, tolerationKey) - _, _, err := utils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } taintedNodes = nil @@ -57,16 +61,16 @@ var _ = Describe("E2E Tolerations Node", Serial, Label(tests.LabelDisruptive, te It("can create a cluster with tolerations", func() { var err error // Initialize empty global namespace variable - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("tainting all the nodes", func() { - nodes, _ := env.GetNodeList() + nodes, _ := nodes.List(env.Ctx, env.Client) // We taint all the nodes where we could run the workloads for _, node := range nodes.Items { if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) { cmd := fmt.Sprintf("kubectl taint node %v %s=test:NoSchedule", node.Name, tolerationKey) - _, _, err := utils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) taintedNodes = append(taintedNodes, node.Name) } diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index 7cca6c938e..2b746bf90c 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -27,7 +30,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -50,22 +58,25 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC It("can update the user application password", func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) + rwService := services.GetReadWriteServiceName(clusterName) appSecretName := clusterName + apiv1.ApplicationUserSecretSuffix superUserSecretName := clusterName + apiv1.SuperUserSecretSuffix + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + By("update user application password", func() { const newPassword = "eeh2Zahohx" //nolint:gosec + AssertUpdateSecret("password", newPassword, appSecretName, namespace, clusterName, 30, env) - AssertConnection(host, testsUtils.AppUser, testsUtils.AppDBName, newPassword, *psqlClientPod, 60, env) + AssertConnection(namespace, rwService, postgres.AppDBName, postgres.AppUser, newPassword, env) }) By("fail updating user application password with wrong user in secret", func() { @@ -76,25 +87,25 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC AssertUpdateSecret("username", newUser, appSecretName, namespace, clusterName, 30, env) timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(host, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432) + dsn := services.CreateDSN(rwService, newUser, postgres.AppDBName, newPassword, services.Require, 5432) - _, _, err := env.ExecCommand(env.Ctx, *psqlClientPod, + _, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, *primaryPod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", "SELECT 1") Expect(err).To(HaveOccurred()) // Revert the username change - AssertUpdateSecret("username", testsUtils.AppUser, appSecretName, namespace, clusterName, 30, env) + AssertUpdateSecret("username", postgres.AppUser, appSecretName, namespace, clusterName, 30, env) }) By("update superuser password", func() { // Setting EnableSuperuserAccess to true - Eventually(func() error { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(true) - return env.Client.Update(env.Ctx, cluster) - }, 60, 5).Should(Not(HaveOccurred())) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, 60, 5).Should(Succeed()) // We should now have a secret var secret corev1.Secret @@ -109,7 +120,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC const newPassword = "fi6uCae7" //nolint:gosec AssertUpdateSecret("password", newPassword, superUserSecretName, namespace, clusterName, 30, env) - AssertConnection(host, testsUtils.PostgresUser, testsUtils.PostgresDBName, newPassword, *psqlClientPod, 60, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, postgres.PostgresUser, newPassword, env) }) }) }) @@ -122,15 +133,23 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi ) var namespace string + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + It("enable and disable superuser access", func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) + rwService := services.GetReadWriteServiceName(clusterName) + secretName := clusterName + apiv1.SuperUserSecretSuffix var secret corev1.Secret namespacedName := types.NamespacedName{ @@ -138,6 +157,9 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi Name: secretName, } + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + By("ensure superuser access is disabled by default", func() { Eventually(func(g Gomega) { err = env.Client.Get(env.Ctx, namespacedName, &secret) @@ -145,15 +167,17 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }, 200).Should(Succeed()) - pod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - timeout := time.Second * 10 - + query := "SELECT rolpassword IS NULL FROM pg_catalog.pg_authid WHERE rolname='postgres'" // We should have the `postgres` user with a null password Eventually(func() string { - stdout, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &timeout, - "psql", "-U", "postgres", "-tAc", - "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) if err != nil { return "" } @@ -163,35 +187,35 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi By("enable superuser access", func() { // Setting EnableSuperuserAccess to true - Eventually(func() error { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(true) - return env.Client.Update(env.Ctx, cluster) - }, 60, 5).Should(Not(HaveOccurred())) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, 60, 5).Should(Succeed()) // We should now have a secret Eventually(func(g Gomega) { err = env.Client.Get(env.Ctx, namespacedName, &secret) g.Expect(err).ToNot(HaveOccurred()) - }, 90).WithPolling(time.Second).Should(Succeed()) + }, 90).Should(Succeed()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) + superUser, superUserPass, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.SuperUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - superUser, superUserPass, err := testsUtils.GetCredentials(clusterName, namespace, - apiv1.SuperUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - AssertConnection(host, superUser, testsUtils.PostgresDBName, superUserPass, *psqlClientPod, 60, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, superUser, superUserPass, env) }) By("disable superuser access", func() { // Setting EnableSuperuserAccess to false - Eventually(func() error { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(false) - return env.Client.Update(env.Ctx, cluster) - }, 60, 5).Should(Not(HaveOccurred())) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, 60, 5).Should(Succeed()) // We expect the secret to eventually be deleted Eventually(func(g Gomega) { diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index f7182daecd..7bd70acfa5 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -35,9 +38,17 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -120,14 +131,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Since the 'cnpg-system' namespace is deleted after each spec is completed, // we should create it and then create the pull image secret - err := env.EnsureNamespace(operatorNamespace) + err := namespaces.EnsureNamespace(env.Ctx, env.Client, operatorNamespace) Expect(err).NotTo(HaveOccurred()) dockerServer := os.Getenv("DOCKER_SERVER") dockerUsername := os.Getenv("DOCKER_USERNAME") dockerPassword := os.Getenv("DOCKER_PASSWORD") if dockerServer != "" && dockerUsername != "" && dockerPassword != "" { - _, _, err := testsUtils.Run(fmt.Sprintf(`kubectl -n %v create secret docker-registry + _, _, err := run.Run(fmt.Sprintf(`kubectl -n %v create secret docker-registry cnpg-pull-secret --docker-server="%v" --docker-username="%v" @@ -146,11 +157,11 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // but a single scheduled backups during the check AssertScheduledBackupsAreScheduled := func(serverName string) { By("verifying scheduled backups are still happening", func() { - latestTar := minioPath(serverName, "data.tar.gz") - currentBackups, err := testsUtils.CountFilesOnMinio(minioEnv, latestTar) + latestTar := minio.GetFilePath(serverName, "data.tar.gz") + currentBackups, err := minio.CountFiles(minioEnv, latestTar) Expect(err).ToNot(HaveOccurred()) Eventually(func() (int, error) { - return testsUtils.CountFilesOnMinio(minioEnv, latestTar) + return minio.CountFiles(minioEnv, latestTar) }, 120).Should(BeNumerically(">", currentBackups)) }) } @@ -167,11 +178,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O } AssertConfUpgrade := func(clusterName, upgradeNamespace string) { + databaseName := "appdb" + By("checking basic functionality performing a configuration upgrade on the cluster", func() { - podList, err := env.GetClusterPodList(upgradeNamespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Gather current primary - cluster, err := env.GetCluster(upgradeNamespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -186,12 +199,17 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O }, 60).ShouldNot(HaveOccurred()) timeout := 300 - commandTimeout := time.Second * 10 // Check that both parameters have been modified in each pod for _, pod := range podList.Items { Eventually(func() (int, error) { - stdout, stderr, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_replication_slots") + stdout, stderr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show max_replication_slots") if err != nil { return 0, err } @@ -204,41 +222,54 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O "Pod %v should have updated its config", pod.Name) Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show maintenance_work_mem") + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + postgres.PostgresDBName, + "show maintenance_work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(256), "Pod %v should have updated its config", pod.Name) } // Check that a switchover happened - Eventually(func() (bool, error) { - c, err := env.GetCluster(upgradeNamespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) bool { + c, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) GinkgoWriter.Printf("Current Primary: %s, Current Primary timestamp: %s\n", c.Status.CurrentPrimary, c.Status.CurrentPrimaryTimestamp) if c.Status.CurrentPrimary != oldPrimary { - return true, nil + return true } else if c.Status.CurrentPrimaryTimestamp != oldPrimaryTimestamp { - return true, nil + return true } - return false, nil + return false }, timeout, "1s").Should(BeTrue()) }) By("verifying that all the standbys streams from the primary", func() { // To check this we find the primary and create a table on it. // The table should be replicated on the standbys. - primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 query := "CREATE TABLE IF NOT EXISTS postswitch(i int);" - _, _, err = env.EventuallyExecCommand(env.Ctx, *primary, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "appdb", "-tAc", query) + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primary.Namespace, + PodName: primary.Name, + }, exec.DatabaseName(databaseName), + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) for i := 1; i < 4; i++ { @@ -252,8 +283,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O if err := env.Client.Get(env.Ctx, podNamespacedName, pod); err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "appdb", "-tAc", + + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + exec.DatabaseName(databaseName), "SELECT count(*) = 0 FROM postswitch") return strings.TrimSpace(out), err }, 240).Should(BeEquivalentTo("t"), @@ -264,12 +301,12 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // getExecutableHashesFromInstances prints the manager's executable hash of each pod to a given IO writer getExecutableHashesFromInstances := func(upgradeNamespace, clusterName string, w io.Writer) error { - pods, err := env.GetClusterPodList(upgradeNamespace, clusterName) + pods, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName) if err != nil { return err } for _, pod := range pods.Items { - status, err := testsUtils.RetrievePgStatusFromInstance(env, pod, true) + status, err := proxy.RetrievePgStatusFromInstance(env.Ctx, env.Interface, pod, true) if err != nil { continue } @@ -333,7 +370,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // assertExpectedMatchingPodUIDs checks that the UID of each pod of a Cluster matches with a given list of UIDs. // expectedMatches defines how many times, when comparing the elements of the 2 lists, you are expected to have // common values - assertExpectedMatchingPodUIDs := func(namespace, clusterName string, podUIDs []types.UID, expectedMatches int) error { + assertExpectedMatchingPodUIDs := func( + namespace, clusterName string, podUIDs []types.UID, expectedMatches int, + ) error { backoffCheckingPodRestarts := wait.Backoff{ Duration: 10 * time.Second, Steps: 30, @@ -343,7 +382,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O } err := retry.OnError(backoffCheckingPodRestarts, shouldRetry, func() error { var currentUIDs []types.UID - currentPodList, err := env.GetClusterPodList(namespace, clusterName) + currentPodList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return err } @@ -364,23 +403,28 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O GinkgoWriter.Println("cleaning up") if CurrentSpecReport().Failed() { // Dump the minio namespace when failed - env.DumpNamespaceObjects(minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log") + namespaces.DumpNamespaceObjects( + env.Ctx, env.Client, + minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log", + ) // Dump the operator namespace, as operator is changing too - env.DumpOperator(operatorNamespace, + operator.Dump( + env.Ctx, env.Client, + operatorNamespace, "out/"+CurrentSpecReport().LeafNodeText+"operator.log") } // Delete the operator's namespace in case that the previous test make corrupted changes to // the operator's namespace so that affects subsequent test - if err := env.DeleteNamespaceAndWait(operatorNamespace, 60); err != nil { + if err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, operatorNamespace, 60); err != nil { return fmt.Errorf("could not cleanup, failed to delete operator namespace: %v", err) } - if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath1); err != nil { + if _, err := minio.CleanFiles(minioEnv, minioPath1); err != nil { return fmt.Errorf("encountered an error while cleaning up minio: %v", err) } - if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath2); err != nil { + if _, err := minio.CleanFiles(minioEnv, minioPath2); err != nil { return fmt.Errorf("encountered an error while cleaning up minio: %v", err) } @@ -395,7 +439,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O namespacePrefix), func() { var err error // Create a upgradeNamespace for all the resources - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a upgradeNamespace should be quick @@ -416,18 +460,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O deployOperator := func(operatorManifestFile string) { By(fmt.Sprintf("applying manager manifest %s", operatorManifestFile), func() { // Upgrade to the new version - _, stderr, err := testsUtils.Run( + _, stderr, err := run.Run( fmt.Sprintf("kubectl apply --server-side --force-conflicts -f %v", operatorManifestFile)) Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr) }) By("waiting for the deployment to be rolled out", func() { - deployment, err := env.GetOperatorDeployment() + deployment, err := operator.GetDeployment(env.Ctx, env.Client) Expect(err).NotTo(HaveOccurred()) timeout := 240 Eventually(func() error { - _, stderr, err := testsUtils.Run(fmt.Sprintf( + _, stderr, err := run.Run(fmt.Sprintf( "kubectl -n %v rollout status deployment %v -w --timeout=%vs", operatorNamespace, deployment.Name, @@ -441,13 +485,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O }, timeout).ShouldNot(HaveOccurred()) }) By("getting the operator info", func() { - pod, err := env.GetOperatorPod() + pod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Println("image used for operator", pod.Spec.Containers[0].Image) }) } - assertClustersWorkAfterOperatorUpgrade := func(upgradeNamespace, operatorManifest string) { + assertClustersWorkAfterOperatorUpgrade := func(upgradeNamespace, operatorManifest string, online bool) { + databaseName := "appdb" // generate random serverNames for the clusters each time serverName1 := fmt.Sprintf("%s-%d", clusterName1, funk.RandomInt(0, 9999)) serverName2 := fmt.Sprintf("%s-%d", clusterName2, funk.RandomInt(0, 9999)) @@ -456,7 +501,15 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O CreateResourceFromFile(upgradeNamespace, pgSecrets) }) By("creating the cloud storage credentials", func() { - AssertStorageCredentialsAreCreated(upgradeNamespace, "aws-creds", "minio", "minio123") + _, err := secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + upgradeNamespace, + "aws-creds", + "minio", + "minio123", + ) + Expect(err).NotTo(HaveOccurred()) }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, upgradeNamespace) @@ -470,11 +523,30 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O err := os.Setenv("SERVER_NAME", serverName1) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(upgradeNamespace, sampleFile) + + if online { + // Upgrading to the new release will trigger a + // rollout of Pods even if online upgrade is + // enabled. This happens because of the + // following PR: + // https://github.com/cloudnative-pg/cloudnative-pg/pull/5503 + // + // This E2e would correctly detect that and trigger a failure. + // To avoid this, just for this release, we disable the pod + // spec reconciliation. + // By doing that we don't test that the online upgrade won't + // trigger any Pod restart. We still test that the operator + // is upgraded in this case too. + _, stderr, err := run.Run( + fmt.Sprintf("kubectl annotate -n %s cluster/%s cnpg.io/reconcilePodSpec=disabled", + upgradeNamespace, clusterName1)) + Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr) + } }) // Cluster ready happens after minio is ready By("having a Cluster with three instances ready", func() { - AssertClusterIsReady(upgradeNamespace, clusterName1, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(upgradeNamespace, clusterName1, testTimeouts[timeouts.ClusterIsReady], env) }) By("creating a Pooler with two instances", func() { @@ -484,13 +556,20 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Now that everything is in place, we add a bit of data we'll use to // check if the backup is working By("creating data on the database", func() { - primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName1) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 query := "CREATE TABLE IF NOT EXISTS to_restore AS VALUES (1),(2);" - _, _, err = env.EventuallyExecCommand(env.Ctx, *primary, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "appdb", "-tAc", query) + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primary.Namespace, + PodName: primary.Name, + }, exec.DatabaseName(databaseName), + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) }) @@ -514,8 +593,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // A file called data.tar.gz should be available on minio Eventually(func() (int, error, error) { - out, _, err := env.ExecCommandInContainer( - testsUtils.ContainerLocator{ + out, _, err := exec.CommandInContainer( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.ContainerLocator{ Namespace: minioEnv.Namespace, PodName: minioEnv.Client.Name, ContainerName: "mc", @@ -535,7 +615,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O assertPGBouncerPodsAreReady(upgradeNamespace, pgBouncerSampleFile, 2) var podUIDs []types.UID - podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podUIDs = append(podUIDs, pod.GetUID()) @@ -584,7 +664,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // the instance pods should not restart By("verifying that the instance pods are not restarted", func() { - podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { Expect(pod.Status.ContainerStatuses).NotTo(BeEmpty()) @@ -599,7 +679,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O err := os.Setenv("SERVER_NAME", serverName2) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(upgradeNamespace, sampleFile2) - AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[timeouts.ClusterIsReady], env) }) AssertConfUpgrade(clusterName2, upgradeNamespace) @@ -609,16 +689,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O By("restoring the backup taken from the first Cluster in a new cluster", func() { restoredClusterName := "cluster-restore" CreateResourceFromFile(upgradeNamespace, restoreFile) - AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], + env) // Test data should be present on restored primary primary := restoredClusterName + "-1" - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName("appdb"), + exec.DatabaseName(databaseName), "SELECT count(*) FROM to_restore") Expect(strings.Trim(out, "\n"), err).To(BeEquivalentTo("2")) @@ -626,12 +708,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // we expect a promotion. We can't enforce "2" because the timeline // ID will also depend on the history files existing in the cloud // storage and we don't know the status of that. - out, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName("appdb"), + exec.DatabaseName(databaseName), "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)") Expect(err).NotTo(HaveOccurred()) Expect(strconv.Atoi(strings.Trim(out, "\n"))).To( @@ -639,13 +722,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Restored standbys should soon attach themselves to restored primary Eventually(func() (string, error) { - out, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName("appdb"), - "SELECT count(*) FROM pg_stat_replication") + exec.DatabaseName(databaseName), + "SELECT count(*) FROM pg_catalog.pg_stat_replication") return strings.Trim(out, "\n"), err }, 180).Should(BeEquivalentTo("2")) }) @@ -698,34 +782,34 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O It("keeps clusters working after a rolling upgrade", func() { upgradeNamespacePrefix := rollingUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, false, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, false) }) - mostRecentTag, err := testsUtils.GetMostRecentReleaseTag("../../releases") + mostRecentTag, err := operator.GetMostRecentReleaseTag("../../releases") Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Printf("installing the recent CNPG tag %s\n", mostRecentTag) - testsUtils.InstallLatestCNPGOperator(mostRecentTag, env) + operator.InstallLatest(env.Client, mostRecentTag) DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest, false) }) It("keeps clusters working after an online upgrade", func() { upgradeNamespacePrefix := onlineUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, true, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, true) }) - mostRecentTag, err := testsUtils.GetMostRecentReleaseTag("../../releases") + mostRecentTag, err := operator.GetMostRecentReleaseTag("../../releases") Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Printf("installing the recent CNPG tag %s\n", mostRecentTag) - testsUtils.InstallLatestCNPGOperator(mostRecentTag, env) + operator.InstallLatest(env.Client, mostRecentTag) DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest, true) }) }) @@ -738,7 +822,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O It("keeps clusters working after an online upgrade", func() { upgradeNamespacePrefix := onlineUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, true, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, true) }) GinkgoWriter.Printf("installing the current operator %s\n", currentOperatorManifest) @@ -746,20 +830,20 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest, true) }) It("keeps clusters working after a rolling upgrade", func() { upgradeNamespacePrefix := rollingUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, false, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, false) }) GinkgoWriter.Printf("installing the current operator %s\n", currentOperatorManifest) deployOperator(currentOperatorManifest) DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest, false) }) }) }) diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index b65ff80fbe..7ee4f33fae 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -20,18 +23,29 @@ import ( "encoding/json" "fmt" "os" + "strconv" "strings" "time" volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" k8client "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -59,6 +73,18 @@ var _ = Describe("Verify Volume Snapshot", return snapshotList, nil } + updateClusterSnapshotClass := func(namespace, clusterName, className string) { + cluster := &apiv1.Cluster{} + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var err error + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + cluster.Spec.Backup.VolumeSnapshot.ClassName = className + return env.Client.Update(env.Ctx, cluster) + }) + Expect(err).ToNot(HaveOccurred()) + } + var namespace string Context("using the kubectl cnpg plugin", Ordered, func() { @@ -74,11 +100,11 @@ var _ = Describe("Verify Volume Snapshot", Skip("Test depth is lower than the amount requested for this test") } var err error - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Initializing namespace variable to be used in test case - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a cluster with three nodes @@ -88,19 +114,20 @@ var _ = Describe("Verify Volume Snapshot", It("can create a Volume Snapshot", func() { var backupObject apiv1.Backup By("creating a volumeSnapshot and waiting until it's completed", func() { - err := testUtils.CreateOnDemandBackupViaKubectlPlugin( - namespace, - clusterName, - "", - apiv1.BackupTargetStandby, - apiv1.BackupMethodVolumeSnapshot, - ) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return backups.CreateOnDemandBackupViaKubectlPlugin( + namespace, + clusterName, + "", + apiv1.BackupTargetStandby, + apiv1.BackupMethodVolumeSnapshot, + ) + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) // trigger a checkpoint as the backup may run on standby CheckPointAndSwitchWalOnPrimary(namespace, clusterName) Eventually(func(g Gomega) { - backupList, err := env.GetBackupList(namespace) + backupList, err := backups.List(env.Ctx, env.Client, namespace) g.Expect(err).ToNot(HaveOccurred()) for _, backup := range backupList.Items { if !strings.Contains(backup.Name, clusterName) { @@ -112,13 +139,14 @@ var _ = Describe("Verify Volume Snapshot", backup.Status.Error) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) } - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("checking that volumeSnapshots are properly labeled", func() { Eventually(func(g Gomega) { for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements { - volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name) + volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, + snapshot.Name) g.Expect(err).ToNot(HaveOccurred()) g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName)) g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name)) @@ -156,10 +184,10 @@ var _ = Describe("Verify Volume Snapshot", } var err error - clusterToSnapshotName, err = env.GetResourceNameFromYAML(clusterToSnapshot) + clusterToSnapshotName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToSnapshot) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("create the certificates for MinIO", func() { @@ -167,7 +195,15 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) }) It("correctly executes PITR with a cold snapshot", func() { @@ -187,11 +223,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", + minioEnv.ServiceName) if err != nil { return false, err } @@ -203,13 +241,15 @@ var _ = Describe("Verify Volume Snapshot", By("creating a snapshot and waiting until it's completed", func() { var err error backupName := fmt.Sprintf("%s-example", clusterToSnapshotName) - backup, err = testUtils.CreateOnDemandBackup( + backup, err = backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterToSnapshotName, backupName, apiv1.BackupTargetStandby, apiv1.BackupMethodVolumeSnapshot, - env) + ) Expect(err).ToNot(HaveOccurred()) // trigger a checkpoint CheckPointAndSwitchWalOnPrimary(namespace, clusterToSnapshotName) @@ -224,7 +264,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("fetching the volume snapshots", func() { @@ -232,48 +272,77 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) Expect(err).ToNot(HaveOccurred()) }) By("inserting test data and creating WALs on the cluster to be snapshotted", func() { // Create a "test" table with values 1,2 - AssertCreateTestData(namespace, clusterToSnapshotName, tableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToSnapshotName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) // Because GetCurrentTimestamp() rounds down to the second and is executed // right after the creation of the test data, we wait for 1s to avoid not // including the newly created data within the recovery_target_time time.Sleep(1 * time.Second) // Get the recovery_target_time and pass it to the template engine - recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterToSnapshotName, env, psqlClientPod) + recoveryTargetTime, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterToSnapshotName, + ) Expect(err).ToNot(HaveOccurred()) err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime) Expect(err).ToNot(HaveOccurred()) + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterToSnapshotName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 4, psqlClientPod) + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) }) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterSnapshotRestoreFile) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterSnapshotRestoreFile) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot and PITR", func() { AssertCreateCluster(namespace, clusterToRestoreName, clusterSnapshotRestoreFile, env) - AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow], + env) }) By("verifying the correct data exists in the restored cluster", func() { - restoredPrimary, err := env.GetClusterPrimary(namespace, clusterToRestoreName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCount(namespace, clusterToRestoreName, tableName, 2, restoredPrimary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) @@ -340,26 +409,33 @@ var _ = Describe("Verify Volume Snapshot", } var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) DeferCleanup(func() { _ = os.Unsetenv(snapshotDataEnv) _ = os.Unsetenv(snapshotWalEnv) }) - clusterToBackupName, err = env.GetResourceNameFromYAML(clusterToBackupFilePath) + clusterToBackupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToBackupFilePath) Expect(err).ToNot(HaveOccurred()) By("creating the cluster on which to execute the backup", func() { AssertCreateCluster(namespace, clusterToBackupName, clusterToBackupFilePath, env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) }) It("can create a declarative cold backup and restoring using it", func() { By("inserting test data", func() { - AssertCreateTestData(namespace, clusterToBackupName, tableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToBackupName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) }) - backupName, err := env.GetResourceNameFromYAML(backupFileFilePath) + backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupFileFilePath) Expect(err).ToNot(HaveOccurred()) By("executing the backup", func() { @@ -370,13 +446,14 @@ var _ = Describe("Verify Volume Snapshot", var backup apiv1.Backup By("waiting the backup to complete", func() { Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup) + err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, + &backup) g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", backup.Status.Error) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -391,31 +468,42 @@ var _ = Describe("Verify Volume Snapshot", var clusterToBackup *apiv1.Cluster By("fetching the created cluster", func() { - clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName) + clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) }) snapshotList := getAndVerifySnapshots(clusterToBackup, backup) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, &backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, &backup, envVars) Expect(err).ToNot(HaveOccurred()) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterToRestoreFilePath) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterToRestoreFilePath) Expect(err).ToNot(HaveOccurred()) By("executing the restore", func() { CreateResourceFromFile(namespace, clusterToRestoreFilePath) + AssertClusterIsReady(namespace, + clusterToRestoreName, + testTimeouts[timeouts.ClusterIsReady], + env, + ) }) By("checking that the data is present on the restored cluster", func() { - AssertDataExpectedCount(namespace, clusterToRestoreName, tableName, 2, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) It("can take a snapshot targeting the primary", func() { - backupName, err := env.GetResourceNameFromYAML(backupPrimaryFilePath) + backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupPrimaryFilePath) Expect(err).ToNot(HaveOccurred()) By("executing the backup", func() { @@ -426,14 +514,15 @@ var _ = Describe("Verify Volume Snapshot", var backup apiv1.Backup By("waiting the backup to complete", func() { Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup) + err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, + &backup) g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.Phase).To( BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", backup.Status.Error) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -448,20 +537,21 @@ var _ = Describe("Verify Volume Snapshot", var clusterToBackup *apiv1.Cluster By("fetching the created cluster", func() { - clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName) + clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) }) _ = getAndVerifySnapshots(clusterToBackup, backup) By("ensuring cluster resumes after snapshot", func() { - AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadyQuick], + env) }) }) It("can take a snapshot in a single instance cluster", func() { By("scaling down the cluster to a single instance", func() { - cluster, err := env.GetCluster(namespace, clusterToBackupName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -472,21 +562,23 @@ var _ = Describe("Verify Volume Snapshot", By("ensuring there is only one pod", func() { Eventually(func(g Gomega) { - pods, err := env.GetClusterPodList(namespace, clusterToBackupName) + pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterToBackupName) g.Expect(err).NotTo(HaveOccurred()) g.Expect(pods.Items).To(HaveLen(1)) - }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed()) }) backupName := "single-instance-snap" By("taking a backup snapshot", func() { - _, err := testUtils.CreateOnDemandBackup( + _, err := backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterToBackupName, backupName, apiv1.BackupTargetStandby, apiv1.BackupMethodVolumeSnapshot, - env) + ) Expect(err).NotTo(HaveOccurred()) }) @@ -494,13 +586,14 @@ var _ = Describe("Verify Volume Snapshot", var backup apiv1.Backup By("waiting the backup to complete", func() { Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup) + err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, + &backup) g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", backup.Status.Error) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -515,14 +608,15 @@ var _ = Describe("Verify Volume Snapshot", var clusterToBackup *apiv1.Cluster By("fetching the created cluster", func() { var err error - clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName) + clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) }) _ = getAndVerifySnapshots(clusterToBackup, backup) By("ensuring cluster resumes after snapshot", func() { - AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadyQuick], + env) }) }) }) @@ -544,17 +638,17 @@ var _ = Describe("Verify Volume Snapshot", ) var clusterToSnapshotName string - var backup *apiv1.Backup + var backupTaken *apiv1.Backup BeforeAll(func() { if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") } var err error - clusterToSnapshotName, err = env.GetResourceNameFromYAML(clusterToSnapshot) + clusterToSnapshotName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToSnapshot) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("create the certificates for MinIO", func() { @@ -562,18 +656,30 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) + }) By("creating the cluster to snapshot", func() { AssertCreateCluster(namespace, clusterToSnapshotName, clusterToSnapshot, env) }) By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", + minioEnv.ServiceName) if err != nil { return false, err } @@ -592,12 +698,33 @@ var _ = Describe("Verify Volume Snapshot", }) By("inserting test data and creating WALs on the cluster to be snapshotted", func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterToSnapshotName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Create a "test" table with values 1,2 - AssertCreateTestData(namespace, clusterToSnapshotName, tableName, psqlClientPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToSnapshotName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 4, psqlClientPod) + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) @@ -606,7 +733,8 @@ var _ = Describe("Verify Volume Snapshot", By("creating a snapshot and waiting until it's completed", func() { var err error backupName := fmt.Sprintf("%s-online", clusterToSnapshotName) - backup, err = testUtils.CreateBackup( + backupTaken, err = backups.Create( + env.Ctx, env.Client, apiv1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -618,7 +746,6 @@ var _ = Describe("Verify Volume Snapshot", Cluster: apiv1.LocalObjectReference{Name: clusterToSnapshotName}, }, }, - env, ) Expect(err).ToNot(HaveOccurred()) @@ -626,41 +753,46 @@ var _ = Describe("Verify Volume Snapshot", err = env.Client.Get(env.Ctx, types.NamespacedName{ Namespace: namespace, Name: backupName, - }, backup) + }, backupTaken) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), + g.Expect(backupTaken.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", - backup.Status.Error) - g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) - g.Expect(backup.Status.BackupLabelFile).ToNot(BeEmpty()) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + backupTaken.Status.Error) + g.Expect(backupTaken.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) + g.Expect(backupTaken.Status.BackupLabelFile).ToNot(BeEmpty()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("fetching the volume snapshots", func() { - snapshotList, err := getSnapshots(backup.Name, clusterToSnapshotName, namespace) + snapshotList, err := getSnapshots(backupTaken.Name, clusterToSnapshotName, namespace) Expect(err).ToNot(HaveOccurred()) - Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) + Expect(snapshotList.Items).To(HaveLen(len(backupTaken.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backupTaken, envVars) Expect(err).ToNot(HaveOccurred()) }) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterSnapshotRestoreFile) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterSnapshotRestoreFile) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot and PITR", func() { AssertCreateCluster(namespace, clusterToRestoreName, clusterSnapshotRestoreFile, env) - AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow], + env) }) By("verifying the correct data exists in the restored cluster", func() { - restoredPrimary, err := env.GetClusterPrimary(namespace, clusterToRestoreName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCount(namespace, clusterToRestoreName, tableName, 4, restoredPrimary) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 4) }) }) @@ -668,9 +800,24 @@ var _ = Describe("Verify Volume Snapshot", // insert some data after the snapshot is taken, we want to verify the data exists in // the new pod when cluster scaled up By("inserting more test data and creating WALs on the cluster snapshotted", func() { + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, + clusterToSnapshotName, + postgres.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 5, psqlClientPod) - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 6, psqlClientPod) + insertRecordIntoTable(tableName, 5, conn) + insertRecordIntoTable(tableName, 6, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) @@ -678,35 +825,114 @@ var _ = Describe("Verify Volume Snapshot", // reuse the snapshot taken from the clusterToSnapshot cluster By("fetching the volume snapshots", func() { - snapshotList, err := getSnapshots(backup.Name, clusterToSnapshotName, namespace) + snapshotList, err := getSnapshots(backupTaken.Name, clusterToSnapshotName, namespace) Expect(err).ToNot(HaveOccurred()) - Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) + Expect(snapshotList.Items).To(HaveLen(len(backupTaken.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backupTaken, envVars) Expect(err).ToNot(HaveOccurred()) }) By("scale up the cluster", func() { - err := env.ScaleClusterSize(namespace, clusterToSnapshotName, 3) + err := clusterutils.ScaleSize(env.Ctx, env.Client, namespace, clusterToSnapshotName, 3) Expect(err).ToNot(HaveOccurred()) }) - By("checking the the cluster is working", func() { + By("checking the cluster is working", func() { // Setting up a cluster with three pods is slow, usually 200-600s - AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[timeouts.ClusterIsReady], env) + }) + + By("checking the new replicas have been created using the snapshot", func() { + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) + Expect(err).ToNot(HaveOccurred()) + for _, pvc := range pvcList.Items { + if pvc.Labels[utils.ClusterInstanceRoleLabelName] == specs.ClusterRoleLabelReplica && + pvc.Labels[utils.ClusterLabelName] == clusterToSnapshotName { + Expect(pvc.Spec.DataSource.Kind).To(Equal(apiv1.VolumeSnapshotKind)) + Expect(pvc.Spec.DataSourceRef.Kind).To(Equal(apiv1.VolumeSnapshotKind)) + } + } }) // we need to verify the streaming replica continue works By("verifying the correct data exists in the new pod of the scaled cluster", func() { - podList, err := env.GetClusterReplicas(namespace, clusterToSnapshotName) + podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, + clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Expect(podList.Items).To(HaveLen(2)) - AssertDataExpectedCount(namespace, clusterToSnapshotName, tableName, 6, &podList.Items[0]) - AssertDataExpectedCount(namespace, clusterToSnapshotName, tableName, 6, &podList.Items[1]) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToSnapshotName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 6) + }) + }) + + It("should clean up unused backup connections", func() { + By("setting a non-existing snapshotClass", func() { + updateClusterSnapshotClass(namespace, clusterToSnapshotName, "wrongSnapshotClass") + }) + + By("starting a new backup that will fail", func() { + backupName := fmt.Sprintf("%s-failed", clusterToSnapshotName) + failedBackup, err := backups.Create( + env.Ctx, env.Client, + apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: backupName, + }, + Spec: apiv1.BackupSpec{ + Target: apiv1.BackupTargetPrimary, + Method: apiv1.BackupMethodVolumeSnapshot, + Cluster: apiv1.LocalObjectReference{Name: clusterToSnapshotName}, + }, + }, + ) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + err = env.Client.Get(env.Ctx, types.NamespacedName{ + Namespace: namespace, + Name: backupName, + }, failedBackup) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(failedBackup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseFailed)) + g.Expect(failedBackup.Status.Error).To(ContainSubstring("Failed to get snapshot class")) + }, RetryTimeout).Should(Succeed()) + }) + + By("verifying that the backup connection is cleaned up", func() { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterToSnapshotName) + Expect(err).ToNot(HaveOccurred()) + query := "SELECT count(*) FROM pg_stat_activity WHERE query ILIKE '%pg_backup_start%' " + + "AND application_name = 'cnpg-instance-manager'" + + Eventually(func() (int, error, error) { + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) + value, atoiErr := strconv.Atoi(strings.TrimSpace(stdout)) + return value, err, atoiErr + }, RetryTimeout).Should(BeEquivalentTo(0), + "Stale backup connection should have been dropped") + }) + + By("resetting the snapshotClass value", func() { + updateClusterSnapshotClass(namespace, clusterToSnapshotName, os.Getenv("E2E_CSI_STORAGE_CLASS")) }) }) }) diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go index 64c371fc82..b1dfde9045 100644 --- a/tests/e2e/wal_restore_parallel_test.go +++ b/tests/e2e/wal_restore_parallel_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -23,6 +26,11 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -62,14 +70,22 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun ) const namespacePrefix = "pg-backup-minio-wal-max-parallel" - clusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, + namespace, + "backup-storage-creds", + "minio", + "minio123", + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -81,12 +97,12 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) // Get the primary - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary = pod.GetName() // Get the standby - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, po := range podList.Items { if po.Name != primary { @@ -100,14 +116,14 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Make sure both Wal-archive and Minio work // Create a WAL on the primary and check if it arrives at minio, within a short time By("archiving WALs and verifying they exist", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL = switchWalAndGetLatestArchive(namespace, primary) - latestWALPath := minioPath(clusterName, latestWAL+".gz") + latestWALPath := minio.GetFilePath(clusterName, latestWAL+".gz") Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture - return testUtils.CountFilesOnMinio(minioEnv, latestWALPath) + return minio.CountFiles(minioEnv, latestWALPath) }, RetryTimeout).Should(BeEquivalentTo(1), fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath)) }) @@ -118,23 +134,29 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun walFile3 = "0000000100000000000000F3" walFile4 = "0000000100000000000000F4" walFile5 = "0000000100000000000000F5" - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile1)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile1)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile2)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile2)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile3)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile3)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile4)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile4)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile5)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile5)). ShouldNot(HaveOccurred()) }) By("asserting the spool directory is empty on the standby", func() { if !testUtils.TestDirectoryEmpty(namespace, standby, SpoolDirectory) { purgeSpoolDirectoryCmd := "rm " + SpoolDirectory + "/*" - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -148,8 +170,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #1 is in the output location, #2 and #3 are in the spool directory. // The flag is unset. By("invoking the wal-restore command requesting #1 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -167,7 +190,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#3 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeFalse(), "end-of-wal-stream flag is unset") @@ -178,8 +204,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #2 is in the output location, #3 is in the spool directory. // The flag is unset. By("invoking the wal-restore command requesting #2 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -193,7 +220,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#3 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeFalse(), "end-of-wal-stream flag is unset") @@ -204,8 +234,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #3 is in the output location, spool directory is empty. // The flag is unset. By("invoking the wal-restore command requesting #3 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -226,8 +257,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #4 is in the output location, #5 is in the spool directory. // The flag is set because #6 file not present. By("invoking the wal-restore command requesting #4 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -241,7 +273,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#5 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is set for #6 wal is not present") @@ -250,7 +285,8 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Generate a new wal file; the archive also contains WAL #6. By("forging a new wal file, the #6 wal", func() { walFile6 = "0000000100000000000000F6" - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile6)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile6)). ShouldNot(HaveOccurred()) }) @@ -258,8 +294,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Expected outcome: // exit code 0, #5 is in the output location, no files in the spool directory. The flag is still present. By("invoking the wal-restore command requesting #5 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -273,7 +310,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeFalse(), "no wal files exist in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is still there") @@ -283,8 +323,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Expected outcome: // exit code 1, output location untouched, no files in the spool directory. The flag is unset. By("invoking the wal-restore command requesting #6 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -306,8 +347,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #6 is in the output location, no files in the spool directory. // The flag is present again because #7 and #8 are unavailable. By("invoking the wal-restore command requesting #6 wal again", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -321,7 +363,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeFalse(), "no wals in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is set for #7 and #8 wal is not present") diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go index 6294f27a53..2960c05006 100644 --- a/tests/e2e/webhook_test.go +++ b/tests/e2e/webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e @@ -22,7 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,7 +51,6 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper ) var webhookNamespace, clusterName string - var clusterIsDefaulted bool var err error BeforeEach(func() { @@ -57,36 +60,34 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper }) BeforeAll(func() { - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) }) It("checks if webhook works as expected", func() { webhookNamespacePrefix := "webhook-test" - clusterIsDefaulted = true By("having a deployment for the operator in state ready", func() { // Make sure that we have at least one operator already working - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) - ready, err := env.IsOperatorDeploymentReady() + ready, err := operator.IsReady(env.Ctx, env.Client, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) // Create a basic PG cluster - webhookNamespace, err := env.CreateUniqueTestNamespace(webhookNamespacePrefix) + webhookNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, webhookNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env) // Check if cluster is ready and the default values are populated - AssertClusterDefault(webhookNamespace, clusterName, clusterIsDefaulted, env) + AssertClusterDefault(webhookNamespace, clusterName, env) }) It("Does not crash the operator when disabled", func() { webhookNamespacePrefix := "no-webhook-test" - clusterIsDefaulted = true - mWebhook, admissionNumber, err := utils.GetCNPGsMutatingWebhookByName(env, mutatingWebhook) + mWebhook, admissionNumber, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutatingWebhook) Expect(err).ToNot(HaveOccurred()) // Add a namespace selector to MutatingWebhooks and ValidatingWebhook, this will assign the webhooks @@ -96,11 +97,13 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper newWebhook.Webhooks[admissionNumber].NamespaceSelector = &metav1.LabelSelector{ MatchLabels: map[string]string{"test": "value"}, } - err := utils.UpdateCNPGsMutatingWebhookConf(env, newWebhook) + err := operator.UpdateMutatingWebhookConf(env.Ctx, env.Interface, newWebhook) Expect(err).ToNot(HaveOccurred()) }) - vWebhook, admissionNumber, err := utils.GetCNPGsValidatingWebhookByName(env, validatingWebhook) + vWebhook, admissionNumber, err := operator.GetValidatingWebhookByName( + env.Ctx, env.Client, validatingWebhook, + ) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("Disabling the validating webhook %v namespace", operatorNamespace), func() { @@ -108,20 +111,20 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper newWebhook.Webhooks[admissionNumber].NamespaceSelector = &metav1.LabelSelector{ MatchLabels: map[string]string{"test": "value"}, } - err := utils.UpdateCNPGsValidatingWebhookConf(env, newWebhook) + err := operator.UpdateValidatingWebhookConf(env.Ctx, env.Interface, newWebhook) Expect(err).ToNot(HaveOccurred()) }) // Create a basic PG cluster - webhookNamespace, err = env.CreateUniqueTestNamespace(webhookNamespacePrefix) + webhookNamespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, webhookNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env) // Check if cluster is ready and has no default value in the object - AssertClusterDefault(webhookNamespace, clusterName, clusterIsDefaulted, env) + AssertClusterDefault(webhookNamespace, clusterName, env) // Make sure the operator is intact and not crashing By("having a deployment for the operator in state ready", func() { - ready, err := env.IsOperatorDeploymentReady() + ready, err := operator.IsReady(env.Ctx, env.Client, false) Expect(err).ShouldNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) diff --git a/tests/labels.go b/tests/labels.go index 81b1329281..15d5461f6e 100644 --- a/tests/labels.go +++ b/tests/labels.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package tests contains the test infrastructure of the CloudNativePG operator @@ -23,25 +26,28 @@ const ( // LabelBackupRestore is a label for only selecting backup and restore tests LabelBackupRestore = "backup-restore" - // LabelBasic is a label for selecting basic test + // LabelBasic is a label for selecting basic tests LabelBasic = "basic" - // LabelClusterMetadata is a label for selecting cluster-metadata test + // LabelClusterMetadata is a label for selecting cluster-metadata tests LabelClusterMetadata = "cluster-metadata" + // LabelDeclarativeDatabases is a label for selecting the declarative databases test + LabelDeclarativeDatabases = "declarative-databases" + // LabelDisruptive is the string for labelling disruptive tests LabelDisruptive = "disruptive" - // LabelImportingDatabases is a label for selecting importing-databases test + // LabelImportingDatabases is a label for selecting the importing-databases test LabelImportingDatabases = "importing-databases" - // LabelMaintenance is a label for selecting maintenance test + // LabelMaintenance is a label for selecting maintenance tests LabelMaintenance = "maintenance" - // LabelNoOpenshift is the string for labelling tests that don't run on Openshift + // LabelNoOpenshift is the string for selecting tests that don't run on Openshift LabelNoOpenshift = "no-openshift" - // LabelObservability is a label for selecting observability test + // LabelObservability is a label for selecting observability tests LabelObservability = "observability" // LabelOperator is a label for only selecting operator tests @@ -50,7 +56,7 @@ const ( // LabelPerformance is the string for labelling performance tests LabelPerformance = "performance" - // LabelPlugin is a label for selecting plugin test + // LabelPlugin is a label for selecting plugin tests LabelPlugin = "plugin" // LabelPodScheduling is a label for selecting pod-scheduling test @@ -59,33 +65,39 @@ const ( // LabelPostgresConfiguration is a label for selecting postgres-configuration test LabelPostgresConfiguration = "postgres-configuration" - // LabelRecovery is a label for selecting recovery test + // LabelPublicationSubscription is a label for selecting the publication / subscription test + LabelPublicationSubscription = "publication-subscription" + + // LabelRecovery is a label for selecting recovery tests LabelRecovery = "recovery" - // LabelReplication is a label for selecting replication test + // LabelReplication is a label for selecting replication tests LabelReplication = "replication" - // LabelSecurity is a label for selecting security test + // LabelSecurity is a label for selecting security tests LabelSecurity = "security" - // LabelSelfHealing is a label for selecting self-healing test + // LabelSelfHealing is a label for selecting self-healing tests LabelSelfHealing = "self-healing" - // LabelServiceConnectivity is a label for selecting service connections test + // LabelServiceConnectivity is a label for selecting service connections tests LabelServiceConnectivity = "service-connectivity" - // LabelSmoke is a label for selecting smoke test + // LabelSmoke is a label for selecting smoke tests LabelSmoke = "smoke" // LabelSnapshot is a label for selecting snapshot tests LabelSnapshot = "snapshot" - // LabelStorage is a label for selecting storage test + // LabelStorage is a label for selecting storage tests LabelStorage = "storage" - // LabelTablespaces is a lable for selectin the tablespaces tests + // LabelTablespaces is a label for selecting the tablespaces test LabelTablespaces = "tablespaces" - // LabelUpgrade is the string for labelling upgrade tests + // LabelUpgrade is a label for upgrade tests LabelUpgrade = "upgrade" + + // LabelPostgresMajorUpgrade is a label for Cluster major version upgrade tests + LabelPostgresMajorUpgrade = "postgres-major-upgrade" ) diff --git a/tests/levels.go b/tests/levels.go index 2f6475755e..724ff114d3 100644 --- a/tests/levels.go +++ b/tests/levels.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tests @@ -20,7 +23,7 @@ import ( "os" "strconv" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" ) // Level - Define test importance. Each test should define its own importance @@ -46,13 +49,13 @@ const defaultTestDepth = int(Medium) // TestEnvLevel struct for operator testing type TestEnvLevel struct { - *utils.TestingEnvironment + *environment.TestingEnvironment Depth int } // TestLevel creates the environment for testing func TestLevel() (*TestEnvLevel, error) { - env, err := utils.NewTestingEnvironment() + env, err := environment.NewTestingEnvironment() if err != nil { return nil, err } diff --git a/tests/utils/azurite.go b/tests/utils/azurite.go deleted file mode 100644 index c7732104b1..0000000000 --- a/tests/utils/azurite.go +++ /dev/null @@ -1,348 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "os" - - apiv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" -) - -const ( - azuriteImage = "mcr.microsoft.com/azure-storage/azurite" - azuriteClientImage = "mcr.microsoft.com/azure-cli" -) - -// AzureConfiguration contains the variables needed to run the azure test environment correctly -type AzureConfiguration struct { - StorageAccount string - StorageKey string - BlobContainer string -} - -func newAzureConfigurationFromEnv() AzureConfiguration { - return AzureConfiguration{ - StorageAccount: os.Getenv("AZURE_STORAGE_ACCOUNT"), - StorageKey: os.Getenv("AZURE_STORAGE_KEY"), - BlobContainer: os.Getenv("AZURE_BLOB_CONTAINER"), - } -} - -// CreateCertificateSecretsOnAzurite will create secrets for Azurite deployment -func CreateCertificateSecretsOnAzurite( - namespace, - clusterName, - azuriteCaSecName, - azuriteTLSSecName string, - env *TestingEnvironment, -) error { - // create CA certificates - _, caPair, err := CreateSecretCA(namespace, clusterName, azuriteCaSecName, true, env) - if err != nil { - return err - } - // sign and create secret using CA certificate and key - serverPair, err := caPair.CreateAndSignPair("azurite", certs.CertTypeServer, - []string{"azurite.internal.mydomain.net, azurite.default.svc, azurite.default,"}, - ) - if err != nil { - return err - } - serverSecret := serverPair.GenerateCertificateSecret(namespace, azuriteTLSSecName) - err = env.Client.Create(env.Ctx, serverSecret) - if err != nil { - return err - } - return nil -} - -// CreateStorageCredentialsOnAzurite will create credentials for Azurite -func CreateStorageCredentialsOnAzurite(namespace string, env *TestingEnvironment) error { - azuriteSecrets := getStorageCredentials(namespace) - return env.Client.Create(env.Ctx, &azuriteSecrets) -} - -// InstallAzurite will set up Azurite in defined namespace and creates service -func InstallAzurite(namespace string, env *TestingEnvironment) error { - azuriteDeployment := getAzuriteDeployment(namespace) - err := env.Client.Create(env.Ctx, &azuriteDeployment) - if err != nil { - return err - } - // Wait for the Azurite pod to be ready - deploymentNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "azurite", - } - deployment := &apiv1.Deployment{} - err = env.Client.Get(env.Ctx, deploymentNamespacedName, deployment) - if err != nil { - return err - } - err = DeploymentWaitForReady(env, deployment, 300) - if err != nil { - return err - } - azuriteService := getAzuriteService(namespace) - err = env.Client.Create(env.Ctx, &azuriteService) - return err -} - -// InstallAzCli will install Az cli -func InstallAzCli(namespace string, env *TestingEnvironment) error { - azCLiPod := getAzuriteClientPod(namespace) - err := PodCreateAndWaitForReady(env, &azCLiPod, 180) - if err != nil { - return err - } - return nil -} - -// getAzuriteClientPod get the cli client pod -func getAzuriteClientPod(namespace string) corev1.Pod { - seccompProfile := &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } - - cliClientPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "az-cli", - Labels: map[string]string{"run": "az-cli"}, - Namespace: namespace, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "az-cli", - Image: azuriteClientImage, - Args: []string{"/bin/bash", "-c", "sleep 500000"}, - Env: []corev1.EnvVar{ - { - Name: "AZURE_CONNECTION_STRING", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "azurite", - }, - Key: "AZURE_CONNECTION_STRING", - }, - }, - }, - { - Name: "REQUESTS_CA_BUNDLE", - Value: "/etc/ssl/certs/rootCA.pem", - }, - { - Name: "HOME", - Value: "/azurite", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "cert", - MountPath: "/etc/ssl/certs", - }, - { - Name: "azurite", - MountPath: "/azurite", - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: seccompProfile, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "cert", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "azurite-ca-secret", - Items: []corev1.KeyToPath{ - { - Key: "ca.crt", - Path: "rootCA.pem", - }, - }, - }, - }, - }, - { - Name: "azurite", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - SeccompProfile: seccompProfile, - }, - }, - } - return cliClientPod -} - -// getAzuriteService get the service for Azurite -func getAzuriteService(namespace string) corev1.Service { - azuriteService := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "azurite", - Labels: map[string]string{"app": "azurite"}, - Namespace: namespace, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 10000, - Protocol: "TCP", - TargetPort: intstr.IntOrString{ - IntVal: 10000, - }, - }, - }, - Selector: map[string]string{"app": "azurite"}, - }, - } - return azuriteService -} - -// getAzuriteDeployment get the deployment for Azurite -func getAzuriteDeployment(namespace string) apiv1.Deployment { - replicas := int32(1) - seccompProfile := &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } - - azuriteDeployment := apiv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "azurite", - Namespace: namespace, - Labels: map[string]string{"app": "azurite"}, - }, - Spec: apiv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "azurite"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "azurite"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Image: azuriteImage, - Name: "azurite", - Command: []string{"azurite"}, - Args: []string{ - "-l", "/data", "--cert", "/etc/ssl/certs/azurite.pem", - "--key", "/etc/ssl/certs/azurite-key.pem", - "--oauth", "basic", "--blobHost", "0.0.0.0", - }, - Env: []corev1.EnvVar{ - { - Name: "AZURITE_ACCOUNTS", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "azurite", - }, - Key: "AZURITE_ACCOUNTS", - }, - }, - }, - }, - Ports: []corev1.ContainerPort{ - { - ContainerPort: 10000, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/data", - Name: "data-volume", - }, - { - MountPath: "/etc/ssl/certs", - Name: "cert", - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: seccompProfile, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "data-volume", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - { - Name: "cert", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "azurite-tls-secret", - Items: []corev1.KeyToPath{ - { - Key: "tls.crt", - Path: "azurite.pem", - }, - { - Key: "tls.key", - Path: "azurite-key.pem", - }, - }, - }, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - SeccompProfile: seccompProfile, - }, - }, - }, - }, - } - return azuriteDeployment -} - -// getStorageCredentials get storageCredentials for Azurite -func getStorageCredentials(namespace string) corev1.Secret { - azuriteStorageSecrets := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "azurite", - }, - StringData: map[string]string{ - "AZURITE_ACCOUNTS": "storageaccountname:c3RvcmFnZWFjY291bnRrZXk=", - "AZURE_CONNECTION_STRING": "DefaultEndpointsProtocol=https;AccountName=storageaccountname;" + - "AccountKey=c3RvcmFnZWFjY291bnRrZXk=;BlobEndpoint=https://azurite:10000/storageaccountname;", - }, - } - return azuriteStorageSecrets -} diff --git a/tests/utils/backup.go b/tests/utils/backup.go deleted file mode 100644 index d88c0b5504..0000000000 --- a/tests/utils/backup.go +++ /dev/null @@ -1,595 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "encoding/json" - "fmt" - "os" - - volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - - . "github.com/onsi/gomega" // nolint -) - -// ExecuteBackup performs a backup and checks the backup status -func ExecuteBackup( - namespace, - backupFile string, - onlyTargetStandbys bool, - timeoutSeconds int, - env *TestingEnvironment, -) *apiv1.Backup { - backupName, err := env.GetResourceNameFromYAML(backupFile) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() error { - _, stderr, err := RunUnchecked("kubectl apply -n " + namespace + " -f " + backupFile) - if err != nil { - return fmt.Errorf("could not create backup.\nStdErr: %v\nError: %v", stderr, err) - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) - backupNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: backupName, - } - backup := &apiv1.Backup{} - // Verifying backup status - Eventually(func() (apiv1.BackupPhase, error) { - err = env.Client.Get(env.Ctx, backupNamespacedName, backup) - return backup.Status.Phase, err - }, timeoutSeconds).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) - Eventually(func() (string, error) { - err = env.Client.Get(env.Ctx, backupNamespacedName, backup) - if err != nil { - return "", err - } - backupStatus := backup.GetStatus() - return backupStatus.BeginLSN, err - }, timeoutSeconds).ShouldNot(BeEmpty()) - - var cluster *apiv1.Cluster - Eventually(func() error { - var err error - cluster, err = env.GetCluster(namespace, backup.Spec.Cluster.Name) - return err - }, timeoutSeconds).ShouldNot(HaveOccurred()) - - backupStatus := backup.GetStatus() - if cluster.Spec.Backup != nil { - backupTarget := cluster.Spec.Backup.Target - if backup.Spec.Target != "" { - backupTarget = backup.Spec.Target - } - switch backupTarget { - case apiv1.BackupTargetPrimary, "": - Expect(backupStatus.InstanceID.PodName).To(BeEquivalentTo(cluster.Status.TargetPrimary)) - case apiv1.BackupTargetStandby: - Expect(backupStatus.InstanceID.PodName).To(BeElementOf(cluster.Status.InstanceNames)) - if onlyTargetStandbys { - Expect(backupStatus.InstanceID.PodName).NotTo(Equal(cluster.Status.TargetPrimary)) - } - } - } - - Expect(backupStatus.BeginWal).NotTo(BeEmpty()) - Expect(backupStatus.EndLSN).NotTo(BeEmpty()) - Expect(backupStatus.EndWal).NotTo(BeEmpty()) - return backup -} - -// CreateClusterFromBackupUsingPITR creates a cluster from backup, using the PITR -func CreateClusterFromBackupUsingPITR( - namespace, - clusterName, - backupFilePath, - targetTime string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - backupName, err := env.GetResourceNameFromYAML(backupFilePath) - if err != nil { - return nil, err - } - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Backup: &apiv1.BackupSource{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: backupName, - }, - }, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - -// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster -// backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - sourceClusterName, - targetTime, - storageCredentialsSecretName, - azStorageAccount, - azBlobContainer string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", - azStorageAccount, azBlobContainer) - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: destinationPath, - BarmanCredentials: apiv1.BarmanCredentials{ - Azure: &apiv1.AzureCredentials{ - StorageAccount: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: storageCredentialsSecretName, - }, - Key: "ID", - }, - StorageKey: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: storageCredentialsSecretName, - }, - Key: "KEY", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - -// CreateClusterFromExternalClusterBackupWithPITROnMinio creates a cluster on Minio, starting from an external cluster -// backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnMinio( - namespace, - externalClusterName, - sourceClusterName, - targetTime string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: "s3://cluster-backups/", - EndpointURL: "https://minio-service.minio:9000", - EndpointCA: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "minio-server-ca-secret", - }, - Key: "ca.crt", - }, - BarmanCredentials: apiv1.BarmanCredentials{ - AWS: &apiv1.S3Credentials{ - AccessKeyIDReference: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "backup-storage-creds", - }, - Key: "ID", - }, - SecretAccessKeyReference: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "backup-storage-creds", - }, - Key: "KEY", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - -// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external -// cluster backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, - externalClusterName, - sourceClusterName, - targetTime string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: DestinationPath, - EndpointCA: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "azurite-ca-secret", - }, - Key: "ca.crt", - }, - BarmanCredentials: apiv1.BarmanCredentials{ - Azure: &apiv1.AzureCredentials{ - ConnectionString: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "azurite", - }, - Key: "AZURE_CONNECTION_STRING", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - -// ComposeAzBlobListAzuriteCmd builds the Azure storage blob list command for Azurite -func ComposeAzBlobListAzuriteCmd(clusterName, path string) string { - return fmt.Sprintf("az storage blob list --container-name %v --query \"[?contains(@.name, \\`%v\\`)].name\" "+ - "--connection-string $AZURE_CONNECTION_STRING", - clusterName, path) -} - -// ComposeAzBlobListCmd builds the Azure storage blob list command -func ComposeAzBlobListCmd( - configuration AzureConfiguration, - clusterName, - path string, -) string { - return fmt.Sprintf("az storage blob list --account-name %v "+ - "--account-key %v "+ - "--container-name %v "+ - "--prefix %v/ "+ - "--query \"[?contains(@.name, \\`%v\\`)].name\"", - configuration.StorageAccount, configuration.StorageKey, configuration.BlobContainer, clusterName, path) -} - -// CountFilesOnAzureBlobStorage counts files on Azure Blob storage -func CountFilesOnAzureBlobStorage( - configuration AzureConfiguration, - clusterName, - path string, -) (int, error) { - azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) - out, _, err := RunUnchecked(azBlobListCmd) - if err != nil { - return -1, err - } - var arr []string - err = json.Unmarshal([]byte(out), &arr) - return len(arr), err -} - -// CountFilesOnAzuriteBlobStorage counts files on Azure Blob storage. using Azurite -func CountFilesOnAzuriteBlobStorage( - namespace, - clusterName, - path string, -) (int, error) { - azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) - out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ - "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) - if err != nil { - return -1, err - } - var arr []string - err = json.Unmarshal([]byte(out), &arr) - return len(arr), err -} - -// GetConditionsInClusterStatus get conditions values as given type from cluster object status -func GetConditionsInClusterStatus( - namespace, - clusterName string, - env *TestingEnvironment, - conditionType apiv1.ClusterConditionType, -) (*metav1.Condition, error) { - var cluster *apiv1.Cluster - var err error - - cluster, err = env.GetCluster(namespace, clusterName) - if err != nil { - return nil, err - } - - for _, cond := range cluster.Status.Conditions { - if cond.Type == string(conditionType) { - return &cond, nil - } - } - - return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType) -} - -// CreateOnDemandBackupViaKubectlPlugin uses the kubectl plugin to create a backup -func CreateOnDemandBackupViaKubectlPlugin( - namespace, - clusterName, - backupName string, - target apiv1.BackupTarget, - method apiv1.BackupMethod, -) error { - command := fmt.Sprintf("kubectl cnpg backup %v -n %v", clusterName, namespace) - - if backupName != "" { - command = fmt.Sprintf("%v --backup-name %v", command, backupName) - } - if target != "" { - command = fmt.Sprintf("%v --backup-target %v", command, target) - } - if method != "" { - command = fmt.Sprintf("%v --method %v", command, method) - } - - _, _, err := Run(command) - return err -} - -// CreateOnDemandBackup creates a Backup resource for a given cluster name -// Deprecated: Use CreateBackup. -// TODO: eradicate -func CreateOnDemandBackup( - namespace, - clusterName, - backupName string, - target apiv1.BackupTarget, - method apiv1.BackupMethod, - env *TestingEnvironment, -) (*apiv1.Backup, error) { - targetBackup := &apiv1.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Name: backupName, - Namespace: namespace, - }, - Spec: apiv1.BackupSpec{ - Cluster: apiv1.LocalObjectReference{ - Name: clusterName, - }, - }, - } - - if target != "" { - targetBackup.Spec.Target = target - } - if method != "" { - targetBackup.Spec.Method = method - } - - obj, err := CreateObject(env, targetBackup) - if err != nil { - return nil, err - } - backup, ok := obj.(*apiv1.Backup) - if !ok { - return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj) - } - return backup, nil -} - -// CreateBackup creates a Backup resource for a given cluster name -func CreateBackup( - targetBackup apiv1.Backup, - env *TestingEnvironment, -) (*apiv1.Backup, error) { - obj, err := CreateObject(env, &targetBackup) - if err != nil { - return nil, err - } - backup, ok := obj.(*apiv1.Backup) - if !ok { - return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj) - } - return backup, nil -} - -// GetVolumeSnapshot gets a VolumeSnapshot given name and namespace -func (env TestingEnvironment) GetVolumeSnapshot( - namespace, - name string, -) (*volumesnapshot.VolumeSnapshot, error) { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - volumeSnapshot := &volumesnapshot.VolumeSnapshot{} - err := GetObject(&env, namespacedName, volumeSnapshot) - if err != nil { - return nil, err - } - return volumeSnapshot, nil -} diff --git a/tests/utils/backups/azurite.go b/tests/utils/backups/azurite.go new file mode 100644 index 0000000000..5c010b9f3b --- /dev/null +++ b/tests/utils/backups/azurite.go @@ -0,0 +1,687 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package backups + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + apiv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" +) + +const ( + azuriteImage = "mcr.microsoft.com/azure-storage/azurite" + azuriteClientImage = "mcr.microsoft.com/azure-cli" +) + +// AzureConfiguration contains the variables needed to run the azure test environment correctly +type AzureConfiguration struct { + StorageAccount string + StorageKey string + BlobContainer string +} + +// NewAzureConfigurationFromEnv creates a new AzureConfiguration from the environment variables +func NewAzureConfigurationFromEnv() AzureConfiguration { + return AzureConfiguration{ + StorageAccount: os.Getenv("AZURE_STORAGE_ACCOUNT"), + StorageKey: os.Getenv("AZURE_STORAGE_KEY"), + BlobContainer: os.Getenv("AZURE_BLOB_CONTAINER"), + } +} + +// CreateCertificateSecretsOnAzurite will create secrets for Azurite deployment +func CreateCertificateSecretsOnAzurite( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName, + azuriteCaSecName, + azuriteTLSSecName string, +) error { + // create CA certificates + _, caPair, err := secrets.CreateSecretCA( + ctx, crudClient, + namespace, clusterName, azuriteCaSecName, + true, + ) + if err != nil { + return err + } + // sign and create secret using CA certificate and key + serverPair, err := caPair.CreateAndSignPair("azurite", certs.CertTypeServer, + []string{"azurite.internal.mydomain.net, azurite.default.svc, azurite.default,"}, + ) + if err != nil { + return err + } + serverSecret := serverPair.GenerateCertificateSecret(namespace, azuriteTLSSecName) + err = crudClient.Create(ctx, serverSecret) + if err != nil { + return err + } + return nil +} + +// CreateStorageCredentialsOnAzurite will create credentials for Azurite +func CreateStorageCredentialsOnAzurite( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { + azuriteSecrets := getStorageCredentials(namespace) + return crudClient.Create(ctx, &azuriteSecrets) +} + +// InstallAzurite will set up Azurite in defined namespace and creates service +func InstallAzurite( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { + azuriteDeployment := getAzuriteDeployment(namespace) + err := crudClient.Create(ctx, &azuriteDeployment) + if err != nil { + return err + } + // Wait for the Azurite pod to be ready + deploymentNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: "azurite", + } + deployment := &apiv1.Deployment{} + err = crudClient.Get(ctx, deploymentNamespacedName, deployment) + if err != nil { + return err + } + err = deployments.WaitForReady(ctx, crudClient, deployment, 300) + if err != nil { + return err + } + azuriteService := getAzuriteService(namespace) + err = crudClient.Create(ctx, &azuriteService) + return err +} + +// InstallAzCli will install Az cli +func InstallAzCli( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { + azCLiPod := getAzuriteClientPod(namespace) + err := pods.CreateAndWaitForReady(ctx, crudClient, &azCLiPod, 180) + if err != nil { + return err + } + return nil +} + +// getAzuriteClientPod get the cli client pod/home/zeus/src/cloudnative-pg/pkg +func getAzuriteClientPod(namespace string) corev1.Pod { + seccompProfile := &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } + + cliClientPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "az-cli", + Labels: map[string]string{"run": "az-cli"}, + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "az-cli", + Image: azuriteClientImage, + Args: []string{"/bin/bash", "-c", "sleep 500000"}, + Env: []corev1.EnvVar{ + { + Name: "AZURE_CONNECTION_STRING", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "azurite", + }, + Key: "AZURE_CONNECTION_STRING", + }, + }, + }, + { + Name: "REQUESTS_CA_BUNDLE", + Value: "/etc/ssl/certs/rootCA.pem", + }, + { + Name: "HOME", + Value: "/azurite", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cert", + MountPath: "/etc/ssl/certs", + }, + { + Name: "azurite", + MountPath: "/azurite", + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + SeccompProfile: seccompProfile, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "cert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "azurite-ca-secret", + Items: []corev1.KeyToPath{ + { + Key: "ca.crt", + Path: "rootCA.pem", + }, + }, + }, + }, + }, + { + Name: "azurite", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: seccompProfile, + }, + }, + } + return cliClientPod +} + +// getAzuriteService get the service for Azurite +func getAzuriteService(namespace string) corev1.Service { + azuriteService := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azurite", + Labels: map[string]string{"app": "azurite"}, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Port: 10000, + Protocol: "TCP", + TargetPort: intstr.IntOrString{ + IntVal: 10000, + }, + }, + }, + Selector: map[string]string{"app": "azurite"}, + }, + } + return azuriteService +} + +// getAzuriteDeployment get the deployment for Azurite +func getAzuriteDeployment(namespace string) apiv1.Deployment { + replicas := int32(1) + seccompProfile := &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } + + azuriteDeployment := apiv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azurite", + Namespace: namespace, + Labels: map[string]string{"app": "azurite"}, + }, + Spec: apiv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "azurite"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "azurite"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: azuriteImage, + Name: "azurite", + Command: []string{"azurite"}, + Args: []string{ + "--skipApiVersionCheck", + "-l", "/data", "--cert", "/etc/ssl/certs/azurite.pem", + "--key", "/etc/ssl/certs/azurite-key.pem", + "--oauth", "basic", "--blobHost", "0.0.0.0", + }, + Env: []corev1.EnvVar{ + { + Name: "AZURITE_ACCOUNTS", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "azurite", + }, + Key: "AZURITE_ACCOUNTS", + }, + }, + }, + }, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 10000, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/data", + Name: "data-volume", + }, + { + MountPath: "/etc/ssl/certs", + Name: "cert", + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + SeccompProfile: seccompProfile, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "data-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "cert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "azurite-tls-secret", + Items: []corev1.KeyToPath{ + { + Key: "tls.crt", + Path: "azurite.pem", + }, + { + Key: "tls.key", + Path: "azurite-key.pem", + }, + }, + }, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: seccompProfile, + }, + }, + }, + }, + } + return azuriteDeployment +} + +// getStorageCredentials get storageCredentials for Azurite +func getStorageCredentials(namespace string) corev1.Secret { + azuriteStorageSecrets := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "azurite", + }, + StringData: map[string]string{ + "AZURITE_ACCOUNTS": "storageaccountname:c3RvcmFnZWFjY291bnRrZXk=", + "AZURE_CONNECTION_STRING": "DefaultEndpointsProtocol=https;AccountName=storageaccountname;" + + "AccountKey=c3RvcmFnZWFjY291bnRrZXk=;BlobEndpoint=https://azurite:10000/storageaccountname;", + }, + } + return azuriteStorageSecrets +} + +// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster +// backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnAzure( + ctx context.Context, + crudClient client.Client, + namespace, + externalClusterName, + sourceClusterName, + targetTime, + storageCredentialsSecretName, + azStorageAccount, + azBlobContainer string, +) (*v1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", + azStorageAccount, azBlobContainer) + + restoreCluster := &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: v1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &v1.BootstrapConfiguration{ + Recovery: &v1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &v1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []v1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &v1.BarmanObjectStoreConfiguration{ + DestinationPath: destinationPath, + BarmanCredentials: v1.BarmanCredentials{ + Azure: &v1.AzureCredentials{ + StorageAccount: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: storageCredentialsSecretName, + }, + Key: "ID", + }, + StorageKey: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: storageCredentialsSecretName, + }, + Key: "KEY", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := objects.Create(ctx, crudClient, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*v1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external +// cluster backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnAzurite( + ctx context.Context, + crudClient client.Client, + namespace, + externalClusterName, + sourceClusterName, + targetTime string, +) (*v1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) + + restoreCluster := &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: v1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &v1.BootstrapConfiguration{ + Recovery: &v1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &v1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []v1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &v1.BarmanObjectStoreConfiguration{ + DestinationPath: DestinationPath, + EndpointCA: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "azurite-ca-secret", + }, + Key: "ca.crt", + }, + BarmanCredentials: v1.BarmanCredentials{ + Azure: &v1.AzureCredentials{ + ConnectionString: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "azurite", + }, + Key: "AZURE_CONNECTION_STRING", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := objects.Create(ctx, crudClient, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*v1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// ComposeAzBlobListAzuriteCmd builds the Azure storage blob list command for Azurite +func ComposeAzBlobListAzuriteCmd(clusterName, path string) string { + return fmt.Sprintf("az storage blob list --container-name %v --query \"[?contains(@.name, \\`%v\\`)].name\" "+ + "--connection-string $AZURE_CONNECTION_STRING", + clusterName, path) +} + +// ComposeAzBlobListCmd builds the Azure storage blob list command +func ComposeAzBlobListCmd( + configuration AzureConfiguration, + clusterName, + path string, +) string { + return fmt.Sprintf("az storage blob list --account-name %v "+ + "--account-key %v "+ + "--container-name %v "+ + "--prefix %v/ "+ + "--query \"[?contains(@.name, \\`%v\\`)].name\"", + configuration.StorageAccount, configuration.StorageKey, configuration.BlobContainer, clusterName, path) +} + +// CountFilesOnAzureBlobStorage counts files on Azure Blob storage +func CountFilesOnAzureBlobStorage( + configuration AzureConfiguration, + clusterName, + path string, +) (int, error) { + azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) + out, _, err := run.Unchecked(azBlobListCmd) + if err != nil { + return -1, err + } + var arr []string + err = json.Unmarshal([]byte(out), &arr) + return len(arr), err +} + +// CountFilesOnAzuriteBlobStorage counts files on Azure Blob storage. using Azurite +func CountFilesOnAzuriteBlobStorage( + namespace, + clusterName, + path string, +) (int, error) { + azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) + out, _, err := run.Unchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ + "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) + if err != nil { + return -1, err + } + var arr []string + err = json.Unmarshal([]byte(out), &arr) + return len(arr), err +} + +// verifySASTokenWriteActivity returns true if the given token has RW permissions, +// otherwise it returns false +func verifySASTokenWriteActivity(containerName string, id string, key string) bool { + _, _, err := run.Unchecked(fmt.Sprintf("az storage container create "+ + "--name %v --account-name %v "+ + "--sas-token %v", containerName, id, key)) + + return err == nil +} + +// CreateSASTokenCredentials generates Secrets for the Azure Blob Storage +func CreateSASTokenCredentials( + ctx context.Context, + crudClient client.Client, + namespace, id, key string, +) error { + // Adding 24 hours to the current time + date := time.Now().UTC().Add(time.Hour * 24) + // Creating date time format for az command + expiringDate := fmt.Sprintf("%v"+"-"+"%d"+"-"+"%v"+"T"+"%v"+":"+"%v"+"Z", + date.Year(), + date.Month(), + date.Day(), + date.Hour(), + date.Minute()) + + out, _, err := run.Run(fmt.Sprintf( + // SAS Token at Blob Container level does not currently work in Barman Cloud + // https://github.com/EnterpriseDB/barman/issues/388 + // we will use SAS Token at Storage Account level + // ( "az storage container generate-sas --account-name %v "+ + // "--name %v "+ + // "--https-only --permissions racwdl --auth-mode key --only-show-errors "+ + // "--expiry \"$(date -u -d \"+4 hours\" '+%%Y-%%m-%%dT%%H:%%MZ')\"", + // id, blobContainerName ) + "az storage account generate-sas --account-name %v "+ + "--https-only --permissions cdlruwap --account-key %v "+ + "--resource-types co --services b --expiry %v -o tsv", + id, key, expiringDate)) + if err != nil { + return err + } + SASTokenRW := strings.TrimRight(out, "\n") + + out, _, err = run.Run(fmt.Sprintf( + "az storage account generate-sas --account-name %v "+ + "--https-only --permissions lr --account-key %v "+ + "--resource-types co --services b --expiry %v -o tsv", + id, key, expiringDate)) + if err != nil { + return err + } + + SASTokenRO := strings.TrimRight(out, "\n") + isReadWrite := verifySASTokenWriteActivity("restore-cluster-sas", id, SASTokenRO) + if isReadWrite { + return fmt.Errorf("expected token to be ready only") + } + + _, err = secrets.CreateObjectStorageSecret( + ctx, crudClient, + namespace, "backup-storage-creds-sas", + id, SASTokenRW, + ) + if err != nil { + return err + } + + _, err = secrets.CreateObjectStorageSecret(ctx, crudClient, + namespace, "restore-storage-creds-sas", + id, SASTokenRO, + ) + if err != nil { + return err + } + + return nil +} diff --git a/tests/utils/backups/backup.go b/tests/utils/backups/backup.go new file mode 100644 index 0000000000..70c3904aae --- /dev/null +++ b/tests/utils/backups/backup.go @@ -0,0 +1,424 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package backups + +import ( + "context" + "fmt" + "os" + + v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v2 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" +) + +// List gathers the current list of backup in namespace +func List( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*apiv1.BackupList, error) { + backupList := &apiv1.BackupList{} + err := crudClient.List( + ctx, backupList, client.InNamespace(namespace), + ) + return backupList, err +} + +// Create creates a Backup resource for a given cluster name +func Create( + ctx context.Context, + crudClient client.Client, + targetBackup apiv1.Backup, +) (*apiv1.Backup, error) { + obj, err := objects.Create(ctx, crudClient, &targetBackup) + if err != nil { + return nil, err + } + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj) + } + return backup, nil +} + +// GetVolumeSnapshot gets a VolumeSnapshot given name and namespace +func GetVolumeSnapshot( + ctx context.Context, + crudClient client.Client, + namespace, name string, +) (*v1.VolumeSnapshot, error) { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + volumeSnapshot := &v1.VolumeSnapshot{} + err := objects.Get(ctx, crudClient, namespacedName, volumeSnapshot) + if err != nil { + return nil, err + } + return volumeSnapshot, nil +} + +// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status +// eventually returns true +func AssertBackupConditionInClusterStatus( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) { + ginkgo.By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { + gomega.Eventually(func() (string, error) { + getBackupCondition, err := GetConditionsInClusterStatus( + ctx, crudClient, + namespace, clusterName, + apiv1.ConditionBackup, + ) + if err != nil { + return "", err + } + return string(getBackupCondition.Status), nil + }, 300, 5).Should(gomega.BeEquivalentTo("True")) + }) +} + +// CreateOnDemandBackupViaKubectlPlugin uses the kubectl plugin to create a backup +func CreateOnDemandBackupViaKubectlPlugin( + namespace, + clusterName, + backupName string, + target apiv1.BackupTarget, + method apiv1.BackupMethod, +) error { + command := fmt.Sprintf("kubectl cnpg backup %v -n %v", clusterName, namespace) + + if backupName != "" { + command = fmt.Sprintf("%v --backup-name %v", command, backupName) + } + if target != "" { + command = fmt.Sprintf("%v --backup-target %v", command, target) + } + if method != "" { + command = fmt.Sprintf("%v --method %v", command, method) + } + + _, _, err := run.Run(command) + return err +} + +// GetConditionsInClusterStatus get conditions values as given type from cluster object status +func GetConditionsInClusterStatus( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + conditionType apiv1.ClusterConditionType, +) (*v2.Condition, error) { + var cluster *apiv1.Cluster + var err error + + cluster, err = clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return nil, err + } + + for _, cond := range cluster.Status.Conditions { + if cond.Type == string(conditionType) { + return &cond, nil + } + } + + return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType) +} + +// Execute performs a backup and checks the backup status +func Execute( + ctx context.Context, + crudClient client.Client, + scheme *runtime.Scheme, + namespace, + backupFile string, + onlyTargetStandbys bool, + timeoutSeconds int, +) *apiv1.Backup { + backupName, err := yaml.GetResourceNameFromYAML(scheme, backupFile) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + _, stderr, err := run.Unchecked("kubectl apply -n " + namespace + " -f " + backupFile) + if err != nil { + return fmt.Errorf("could not create backup.\nStdErr: %v\nError: %v", stderr, err) + } + return nil + }, 60, objects.PollingTime).Should(gomega.Succeed()) + backupNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: backupName, + } + backup := &apiv1.Backup{} + // Verifying backup status + gomega.Eventually(func() (apiv1.BackupPhase, error) { + err = crudClient.Get(ctx, backupNamespacedName, backup) + return backup.Status.Phase, err + }, timeoutSeconds).Should(gomega.BeEquivalentTo(apiv1.BackupPhaseCompleted)) + gomega.Eventually(func() (string, error) { + err = crudClient.Get(ctx, backupNamespacedName, backup) + if err != nil { + return "", err + } + backupStatus := backup.GetStatus() + return backupStatus.BeginLSN, err + }, timeoutSeconds).ShouldNot(gomega.BeEmpty()) + + var cluster *apiv1.Cluster + gomega.Eventually(func() error { + var err error + cluster, err = clusterutils.Get(ctx, crudClient, namespace, backup.Spec.Cluster.Name) + return err + }, timeoutSeconds).ShouldNot(gomega.HaveOccurred()) + + backupStatus := backup.GetStatus() + if cluster.Spec.Backup != nil { + backupTarget := cluster.Spec.Backup.Target + if backup.Spec.Target != "" { + backupTarget = backup.Spec.Target + } + switch backupTarget { + case apiv1.BackupTargetPrimary, "": + gomega.Expect(backupStatus.InstanceID.PodName).To(gomega.BeEquivalentTo(cluster.Status.TargetPrimary)) + case apiv1.BackupTargetStandby: + gomega.Expect(backupStatus.InstanceID.PodName).To(gomega.BeElementOf(cluster.Status.InstanceNames)) + if onlyTargetStandbys { + gomega.Expect(backupStatus.InstanceID.PodName).NotTo(gomega.Equal(cluster.Status.TargetPrimary)) + } + } + } + + gomega.Expect(backupStatus.BeginWal).NotTo(gomega.BeEmpty()) + gomega.Expect(backupStatus.EndLSN).NotTo(gomega.BeEmpty()) + gomega.Expect(backupStatus.EndWal).NotTo(gomega.BeEmpty()) + return backup +} + +// CreateClusterFromBackupUsingPITR creates a cluster from backup, using the PITR +func CreateClusterFromBackupUsingPITR( + ctx context.Context, + crudClient client.Client, + scheme *runtime.Scheme, + namespace, + clusterName, + backupFilePath, + targetTime string, +) (*apiv1.Cluster, error) { + backupName, err := yaml.GetResourceNameFromYAML(scheme, backupFilePath) + if err != nil { + return nil, err + } + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + restoreCluster := &apiv1.Cluster{ + ObjectMeta: v2.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Backup: &apiv1.BackupSource{ + LocalObjectReference: apiv1.LocalObjectReference{ + Name: backupName, + }, + }, + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + }, + } + obj, err := objects.Create(ctx, crudClient, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// CreateClusterFromExternalClusterBackupWithPITROnMinio creates a cluster on Minio, starting from an external cluster +// backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnMinio( + ctx context.Context, + crudClient client.Client, + namespace, + externalClusterName, + sourceClusterName, + targetTime string, +) (*apiv1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + + restoreCluster := &apiv1.Cluster{ + ObjectMeta: v2.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &apiv1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []apiv1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + DestinationPath: "s3://cluster-backups/", + EndpointURL: "https://minio-service.minio:9000", + EndpointCA: &apiv1.SecretKeySelector{ + LocalObjectReference: apiv1.LocalObjectReference{ + Name: "minio-server-ca-secret", + }, + Key: "ca.crt", + }, + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{ + AccessKeyIDReference: &apiv1.SecretKeySelector{ + LocalObjectReference: apiv1.LocalObjectReference{ + Name: "backup-storage-creds", + }, + Key: "ID", + }, + SecretAccessKeyReference: &apiv1.SecretKeySelector{ + LocalObjectReference: apiv1.LocalObjectReference{ + Name: "backup-storage-creds", + }, + Key: "KEY", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := objects.Create(ctx, crudClient, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// CreateOnDemand creates a Backup resource for a given cluster name +// Deprecated: Use Create. +// TODO: eradicate +func CreateOnDemand( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName, + backupName string, + target apiv1.BackupTarget, + method apiv1.BackupMethod, +) (*apiv1.Backup, error) { + targetBackup := &apiv1.Backup{ + ObjectMeta: v2.ObjectMeta{ + Name: backupName, + Namespace: namespace, + }, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{ + Name: clusterName, + }, + }, + } + + if target != "" { + targetBackup.Spec.Target = target + } + if method != "" { + targetBackup.Spec.Method = method + } + + obj, err := objects.Create(ctx, crudClient, targetBackup) + if err != nil { + return nil, err + } + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj) + } + return backup, nil +} diff --git a/pkg/conditions/doc.go b/tests/utils/backups/doc.go similarity index 72% rename from pkg/conditions/doc.go rename to tests/utils/backups/doc.go index acecc6fc10..2e04155498 100644 --- a/pkg/conditions/doc.go +++ b/tests/utils/backups/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,8 +13,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package conditions contains functions useful to update the conditions -// on the resources managed by the operator -package conditions +// Package backups provides backup utilities +package backups diff --git a/tests/utils/certificates.go b/tests/utils/certificates.go deleted file mode 100644 index f6d08cdcad..0000000000 --- a/tests/utils/certificates.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - - corev1 "k8s.io/api/core/v1" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// CreateClientCertificatesViaKubectlPlugin creates a certificate for a given user on a given cluster -func CreateClientCertificatesViaKubectlPlugin( - cluster apiv1.Cluster, - certName string, - userName string, - env *TestingEnvironment, -) error { - // clientCertName := "cluster-cert" - // user := "app" - // Create the certificate - _, _, err := Run(fmt.Sprintf( - "kubectl cnpg certificate %v --cnpg-cluster %v --cnpg-user %v -n %v", - certName, - cluster.Name, - userName, - cluster.Namespace)) - if err != nil { - return err - } - // Verifying client certificate secret existence - secret := &corev1.Secret{} - err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: certName}, secret) - return err -} diff --git a/tests/utils/cloud_vendor.go b/tests/utils/cloudvendors/cloud_vendor.go similarity index 90% rename from tests/utils/cloud_vendor.go rename to tests/utils/cloudvendors/cloud_vendor.go index 1f3062a15b..5619f228a8 100644 --- a/tests/utils/cloud_vendor.go +++ b/tests/utils/cloudvendors/cloud_vendor.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package cloudvendors provides the variables to define on which cloud vendor the e2e test is running +package cloudvendors import ( "fmt" @@ -37,7 +41,7 @@ var EKS = TestEnvVendor("eks") // GKE google cloud cluster var GKE = TestEnvVendor("gke") -// LOCAL kind or k3d cluster running locally +// LOCAL kind cluster running locally var LOCAL = TestEnvVendor("local") // OCP openshift cloud cluster diff --git a/tests/utils/cluster.go b/tests/utils/cluster.go deleted file mode 100644 index de6a301c83..0000000000 --- a/tests/utils/cluster.go +++ /dev/null @@ -1,409 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "text/tabwriter" - - "github.com/cheynewallace/tabby" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// AllClusterPodsHaveLabels verifies if the labels defined in a map are included -// in all the pods of a cluster -func AllClusterPodsHaveLabels( - env *TestingEnvironment, - namespace, clusterName string, - labels map[string]string, -) (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return false, err - } - podList, err := env.GetClusterPodList(namespace, clusterName) - if err != nil { - return false, err - } - if len(podList.Items) != cluster.Spec.Instances { - return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) - } - for _, pod := range podList.Items { - if !PodHasLabels(pod, labels) { - return false, fmt.Errorf("%v found labels, expected %v", pod.Labels, labels) - } - } - return true, nil -} - -// AllClusterPodsHaveAnnotations verifies if the annotations defined in a map are included -// in all the pods of a cluster -func AllClusterPodsHaveAnnotations( - env *TestingEnvironment, - namespace, clusterName string, - annotations map[string]string, -) (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return false, err - } - podList, err := env.GetClusterPodList(namespace, clusterName) - if err != nil { - return false, err - } - if len(podList.Items) != cluster.Spec.Instances { - return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) - } - for _, pod := range podList.Items { - if !PodHasAnnotations(pod, annotations) { - return false, fmt.Errorf("%v found annotations, %v expected", pod.Annotations, annotations) - } - } - return true, nil -} - -// ClusterHasLabels verifies that the labels of a cluster contain a specified -// labels map -func ClusterHasLabels( - cluster *apiv1.Cluster, - labels map[string]string, -) bool { - clusterLabels := cluster.Labels - for k, v := range labels { - val, ok := clusterLabels[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// ClusterHasAnnotations verifies that the annotations of a cluster contain a specified -// annotations map -func ClusterHasAnnotations( - cluster *apiv1.Cluster, - annotations map[string]string, -) bool { - clusterAnnotations := cluster.Annotations - for k, v := range annotations { - val, ok := clusterAnnotations[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// DumpNamespaceObjects logs the clusters, pods, pvcs etc. found in a namespace as JSON sections -func (env TestingEnvironment) DumpNamespaceObjects(namespace string, filename string) { - f, err := os.Create(filepath.Clean(filename)) - if err != nil { - fmt.Println(err) - return - } - defer func() { - _ = f.Sync() - _ = f.Close() - }() - w := bufio.NewWriter(f) - clusterList := &apiv1.ClusterList{} - _ = GetObjectList(&env, clusterList, client.InNamespace(namespace)) - - for _, cluster := range clusterList.Items { - out, _ := json.MarshalIndent(cluster, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v cluster\n", namespace, cluster.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - podList, _ := env.GetPodList(namespace) - for _, pod := range podList.Items { - out, _ := json.MarshalIndent(pod, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - pvcList, _ := env.GetPVCList(namespace) - for _, pvc := range pvcList.Items { - out, _ := json.MarshalIndent(pvc, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v PVC\n", namespace, pvc.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - jobList, _ := env.GetJobList(namespace) - for _, job := range jobList.Items { - out, _ := json.MarshalIndent(job, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v job\n", namespace, job.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - eventList, _ := env.GetEventList(namespace) - out, _ := json.MarshalIndent(eventList.Items, "", " ") - _, _ = fmt.Fprintf(w, "Dumping events for namespace %v\n", namespace) - _, _ = fmt.Fprintln(w, string(out)) - - serviceAccountList, _ := env.GetServiceAccountList(namespace) - for _, sa := range serviceAccountList.Items { - out, _ := json.MarshalIndent(sa, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v serviceaccount\n", namespace, sa.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - suffixes := []string{"-r", "-rw", "-any"} - for _, cluster := range clusterList.Items { - for _, suffix := range suffixes { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: cluster.Name + suffix, - } - endpoint := &corev1.Endpoints{} - _ = env.Client.Get(env.Ctx, namespacedName, endpoint) - out, _ := json.MarshalIndent(endpoint, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v endpoint\n", namespace, endpoint.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - } - // dump backup info - backupList, _ := env.GetBackupList(namespace) - // dump backup object info if it's configure - for _, backup := range backupList.Items { - out, _ := json.MarshalIndent(backup, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v backup\n", namespace, backup.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - // dump scheduledbackup info - scheduledBackupList, _ := env.GetScheduledBackupList(namespace) - // dump backup object info if it's configure - for _, scheduledBackup := range scheduledBackupList.Items { - out, _ := json.MarshalIndent(scheduledBackup, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v scheduledbackup\n", namespace, scheduledBackup.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - err = w.Flush() - if err != nil { - fmt.Println(err) - return - } -} - -// GetCluster gets a cluster given name and namespace -func (env TestingEnvironment) GetCluster(namespace string, name string) (*apiv1.Cluster, error) { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - cluster := &apiv1.Cluster{} - err := GetObject(&env, namespacedName, cluster) - if err != nil { - return nil, err - } - return cluster, nil -} - -// GetClusterPodList gathers the current list of instance pods for a cluster in a namespace -func (env TestingEnvironment) GetClusterPodList(namespace string, clusterName string) (*corev1.PodList, error) { - podList := &corev1.PodList{} - err := GetObjectList(&env, podList, client.InNamespace(namespace), - client.MatchingLabels{ - utils.ClusterLabelName: clusterName, - utils.PodRoleLabelName: "instance", // this ensures we are getting instance pods only - }, - ) - return podList, err -} - -// GetClusterPrimary gets the primary pod of a cluster -func (env TestingEnvironment) GetClusterPrimary(namespace string, clusterName string) (*corev1.Pod, error) { - podList := &corev1.PodList{} - - err := GetObjectList(&env, podList, client.InNamespace(namespace), - client.MatchingLabels{ - utils.ClusterLabelName: clusterName, - utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelPrimary, - }, - ) - if err != nil { - return &corev1.Pod{}, err - } - if len(podList.Items) > 0 { - // if there are multiple, get the one without deletion timestamp - for _, pod := range podList.Items { - if pod.DeletionTimestamp == nil { - return &pod, nil - } - } - err = fmt.Errorf("all pod with primary role has deletion timestamp") - return &(podList.Items[0]), err - } - err = fmt.Errorf("no primary found") - return &corev1.Pod{}, err -} - -// GetClusterReplicas gets a slice containing all the replica pods of a cluster -func (env TestingEnvironment) GetClusterReplicas(namespace string, clusterName string) (*corev1.PodList, error) { - podList := &corev1.PodList{} - err := GetObjectList(&env, podList, client.InNamespace(namespace), - client.MatchingLabels{ - utils.ClusterLabelName: clusterName, - utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelReplica, - }, - ) - if err != nil { - return podList, err - } - if len(podList.Items) > 0 { - return podList, nil - } - err = fmt.Errorf("no replicas found") - return podList, err -} - -// ScaleClusterSize scales a cluster to the requested size -func (env TestingEnvironment) ScaleClusterSize(namespace, clusterName string, newClusterSize int) error { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return err - } - originalCluster := cluster.DeepCopy() - cluster.Spec.Instances = newClusterSize - err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(originalCluster)) - if err != nil { - return err - } - return nil -} - -// PrintClusterResources prints a summary of the cluster pods, jobs, pvcs etc. -func PrintClusterResources(namespace, clusterName string, env *TestingEnvironment) string { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return fmt.Sprintf("Error while Getting Object %v", err) - } - - buffer := &bytes.Buffer{} - w := tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0) - clusterInfo := tabby.NewCustom(w) - clusterInfo.AddLine("Timeout while waiting for cluster ready, dumping more cluster information for analysis...") - clusterInfo.AddLine() - clusterInfo.AddLine() - clusterInfo.AddLine("Cluster information:") - clusterInfo.AddLine("Name", cluster.GetName()) - clusterInfo.AddLine("Namespace", cluster.GetNamespace()) - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - clusterInfo.AddLine("Spec.Instances", cluster.Spec.Instances) - clusterInfo.AddLine("Wal storage", cluster.ShouldCreateWalArchiveVolume()) - clusterInfo.AddLine("Cluster phase", cluster.Status.Phase) - clusterInfo.AddLine("Phase reason", cluster.Status.PhaseReason) - clusterInfo.AddLine("Cluster target primary", cluster.Status.TargetPrimary) - clusterInfo.AddLine("Cluster current primary", cluster.Status.CurrentPrimary) - clusterInfo.AddLine() - - podList, _ := env.GetClusterPodList(cluster.GetNamespace(), cluster.GetName()) - - clusterInfo.AddLine("Cluster Pods information:") - clusterInfo.AddLine("Ready pod number: ", utils.CountReadyPods(podList.Items)) - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - for _, pod := range podList.Items { - clusterInfo.AddLine("Pod name", pod.Name) - clusterInfo.AddLine("Pod phase", pod.Status.Phase) - if cluster.Status.InstancesReportedState != nil { - if instanceReportState, ok := cluster.Status.InstancesReportedState[apiv1.PodName(pod.Name)]; ok { - clusterInfo.AddLine("Is Primary", instanceReportState.IsPrimary) - clusterInfo.AddLine("TimeLineID", instanceReportState.TimeLineID) - clusterInfo.AddLine("---", "---") - } - } else { - clusterInfo.AddLine("InstanceReportState not reported", "") - } - } - - clusterInfo.AddLine("Jobs information:") - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - jobList, _ := env.GetJobList(cluster.GetNamespace()) - for _, job := range jobList.Items { - clusterInfo.AddLine("Job name", job.Name) - clusterInfo.AddLine("Job status", fmt.Sprintf("%#v", job.Status)) - } - - pvcList, _ := env.GetPVCList(cluster.GetNamespace()) - clusterInfo.AddLine() - clusterInfo.AddLine("Cluster PVC information: (dumping all pvc under the namespace)") - clusterInfo.AddLine("Available Cluster PVCCount", cluster.Status.PVCCount) - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - for _, pvc := range pvcList.Items { - clusterInfo.AddLine("PVC name", pvc.Name) - clusterInfo.AddLine("PVC phase", pvc.Status.Phase) - clusterInfo.AddLine("---", "---") - } - - snapshotList, _ := env.GetSnapshotList(cluster.Namespace) - clusterInfo.AddLine() - clusterInfo.AddLine("Cluster Snapshot information: (dumping all snapshot under the namespace)") - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - for _, snapshot := range snapshotList.Items { - clusterInfo.AddLine("Snapshot name", snapshot.Name) - if snapshot.Status.ReadyToUse != nil { - clusterInfo.AddLine("Snapshot ready to use", *snapshot.Status.ReadyToUse) - } else { - clusterInfo.AddLine("Snapshot ready to use", "false") - } - clusterInfo.AddLine("---", "---") - } - - // do not remove, this is needed to ensure that the writer cache is always flushed. - clusterInfo.Print() - - return buffer.String() -} - -// DescribeKubernetesNodes prints the `describe node` for each node in the -// kubernetes cluster -func (env TestingEnvironment) DescribeKubernetesNodes() (string, error) { - nodeList, err := env.GetNodeList() - if err != nil { - return "", err - } - var report strings.Builder - for _, node := range nodeList.Items { - command := fmt.Sprintf("kubectl describe node %v", node.Name) - stdout, _, err := Run(command) - if err != nil { - return "", err - } - report.WriteString("================================================\n") - report.WriteString(stdout) - report.WriteString("================================================\n") - } - return report.String(), nil -} diff --git a/tests/utils/clusterutils/cluster.go b/tests/utils/clusterutils/cluster.go new file mode 100644 index 0000000000..01ad4c421a --- /dev/null +++ b/tests/utils/clusterutils/cluster.go @@ -0,0 +1,230 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package clusterutils provides functions to handle cluster actions +package clusterutils + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" +) + +// AllPodsHaveLabels verifies if the labels defined in a map are included +// in all the pods of a cluster +func AllPodsHaveLabels( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + labels map[string]string, +) (bool, error) { + cluster, err := Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + podList, err := ListPods(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + if len(podList.Items) != cluster.Spec.Instances { + return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) + } + for _, pod := range podList.Items { + if !pods.HasLabels(pod, labels) { + return false, fmt.Errorf("%v found labels, expected %v", pod.Labels, labels) + } + } + return true, nil +} + +// AllPodsHaveAnnotations verifies if the annotations defined in a map are included +// in all the pods of a cluster +func AllPodsHaveAnnotations( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + annotations map[string]string, +) (bool, error) { + cluster, err := Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + podList, err := ListPods(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + if len(podList.Items) != cluster.Spec.Instances { + return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) + } + for _, pod := range podList.Items { + if !pods.HasAnnotations(pod, annotations) { + return false, fmt.Errorf("%v found annotations, %v expected", pod.Annotations, annotations) + } + } + return true, nil +} + +// HasLabels verifies that the labels of a cluster contain a specified +// labels map +func HasLabels( + cluster *apiv1.Cluster, + labels map[string]string, +) bool { + clusterLabels := cluster.Labels + for k, v := range labels { + val, ok := clusterLabels[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// HasAnnotations verifies that the annotations of a cluster contain a specified +// annotations map +func HasAnnotations( + cluster *apiv1.Cluster, + annotations map[string]string, +) bool { + clusterAnnotations := cluster.Annotations + for k, v := range annotations { + val, ok := clusterAnnotations[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// Get gets a cluster given name and namespace +func Get( + ctx context.Context, + crudClient client.Client, + namespace, name string, +) (*apiv1.Cluster, error) { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + cluster := &apiv1.Cluster{} + err := objects.Get(ctx, crudClient, namespacedName, cluster) + if err != nil { + return nil, err + } + return cluster, nil +} + +// ListPods gathers the current list of instance pods for a cluster in a namespace +func ListPods( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.PodList, error) { + podList := &corev1.PodList{} + err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace), + client.MatchingLabels{ + utils.ClusterLabelName: clusterName, + utils.PodRoleLabelName: "instance", // this ensures we are getting instance pods only + }, + ) + return podList, err +} + +// GetPrimary gets the primary pod of a cluster +func GetPrimary( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.Pod, error) { + podList := &corev1.PodList{} + + err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace), + client.MatchingLabels{ + utils.ClusterLabelName: clusterName, + utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelPrimary, + }, + ) + if err != nil { + return &corev1.Pod{}, err + } + if len(podList.Items) > 0 { + // if there are multiple, get the one without deletion timestamp + for _, pod := range podList.Items { + if pod.DeletionTimestamp == nil { + return &pod, nil + } + } + err = fmt.Errorf("all pod with primary role has deletion timestamp") + return &(podList.Items[0]), err + } + err = fmt.Errorf("no primary found") + return &corev1.Pod{}, err +} + +// GetReplicas gets a slice containing all the replica pods of a cluster +func GetReplicas( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.PodList, error) { + podList := &corev1.PodList{} + err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace), + client.MatchingLabels{ + utils.ClusterLabelName: clusterName, + utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelReplica, + }, + ) + if err != nil { + return podList, err + } + if len(podList.Items) > 0 { + return podList, nil + } + err = fmt.Errorf("no replicas found") + return podList, err +} + +// ScaleSize scales a cluster to the requested size +func ScaleSize( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + newClusterSize int, +) error { + cluster, err := Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return err + } + originalCluster := cluster.DeepCopy() + cluster.Spec.Instances = newClusterSize + err = crudClient.Patch(ctx, cluster, client.MergeFrom(originalCluster)) + if err != nil { + return err + } + return nil +} diff --git a/tests/utils/commons.go b/tests/utils/commons.go deleted file mode 100644 index d3c77a667e..0000000000 --- a/tests/utils/commons.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "time" - - "github.com/avast/retry-go/v4" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// ForgeArchiveWalOnMinio instead of using `switchWalCmd` to generate a real WAL archive, directly forges a WAL archive -// file on Minio by copying and renaming an existing WAL archive file for the sake of more control of testing. To make -// sure the forged one won't be a real WAL archive, we let the sequence in newWALName to be big enough so that it can't -// be a real WAL archive name in an idle postgresql. -func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error { - // Forge a WAL archive by copying and renaming the 1st WAL archive - minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000" - existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz" - newWALNamePath := minioWALBasePath + "/" + newWALName - forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath - _, _, err := RunUncheckedRetry(fmt.Sprintf( - "kubectl exec -n %v %v -- %v", - namespace, - miniClientPodName, - forgeWALOnMinioCmd)) - - return err -} - -// TestFileExist tests if a file specified with `fileName` exist under directory `directoryPath`, on pod `podName` in -// namespace `namespace` -func TestFileExist(namespace, podName, directoryPath, fileName string) bool { - filePath := directoryPath + "/" + fileName - testFileExistCommand := "test -f " + filePath - _, _, err := RunUnchecked(fmt.Sprintf( - "kubectl exec -n %v %v -- %v", - namespace, - podName, - testFileExistCommand)) - - return err == nil -} - -// TestDirectoryEmpty tests if a directory `directoryPath` exists on pod `podName` in namespace `namespace` -func TestDirectoryEmpty(namespace, podName, directoryPath string) bool { - testDirectoryEmptyCommand := "test \"$(ls -A" + directoryPath + ")\"" - _, _, err := RunUnchecked(fmt.Sprintf( - "kubectl exec -n %v %v -- %v", - namespace, - podName, - testDirectoryEmptyCommand)) - - return err == nil -} - -// CreateObject create object in the Kubernetes cluster -func CreateObject(env *TestingEnvironment, object client.Object, opts ...client.CreateOption) (client.Object, error) { - err := retry.Do( - func() error { - return env.Client.Create(env.Ctx, object, opts...) - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - retry.RetryIf(func(err error) bool { return !apierrs.IsAlreadyExists(err) }), - ) - return object, err -} - -// DeleteObject delete object in the Kubernetes cluster -func DeleteObject(env *TestingEnvironment, object client.Object, opts ...client.DeleteOption) error { - err := retry.Do( - func() error { - return env.Client.Delete(env.Ctx, object, opts...) - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - retry.RetryIf(func(err error) bool { return !apierrs.IsNotFound(err) }), - ) - return err -} - -// GetObjectList retrieves list of objects for a given namespace and list options -func GetObjectList(env *TestingEnvironment, objectList client.ObjectList, opts ...client.ListOption) error { - err := retry.Do( - func() error { - err := env.Client.List(env.Ctx, objectList, opts...) - if err != nil { - return err - } - return nil - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - ) - return err -} - -// GetObject retrieves an objects for the given object key from the Kubernetes Cluster -func GetObject(env *TestingEnvironment, objectKey client.ObjectKey, object client.Object) error { - err := retry.Do( - func() error { - err := env.Client.Get(env.Ctx, objectKey, object) - if err != nil { - return err - } - return nil - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - ) - return err -} diff --git a/tests/utils/deployment.go b/tests/utils/deployment.go deleted file mode 100644 index 31995afc7d..0000000000 --- a/tests/utils/deployment.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "time" - - "github.com/avast/retry-go/v4" - appsv1 "k8s.io/api/apps/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// DeploymentIsReady checks if a Deployment is ready -func DeploymentIsReady(deployment appsv1.Deployment) bool { - return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas -} - -// DeploymentWaitForReady waits for a Deployment to be ready -func DeploymentWaitForReady(env *TestingEnvironment, deployment *appsv1.Deployment, timeoutSeconds uint) error { - err := retry.Do( - func() error { - if err := env.Client.Get(env.Ctx, client.ObjectKey{ - Namespace: deployment.Namespace, - Name: deployment.Name, - }, deployment); err != nil { - return err - } - if !DeploymentIsReady(*deployment) { - return fmt.Errorf( - "deployment not ready. Namespace: %v, Name: %v", - deployment.Namespace, - deployment.Name, - ) - } - return nil - }, - retry.Attempts(timeoutSeconds), - retry.Delay(time.Second), - retry.DelayType(retry.FixedDelay), - ) - return err -} diff --git a/tests/utils/deployments/deployment.go b/tests/utils/deployments/deployment.go new file mode 100644 index 0000000000..ae05f1e4d6 --- /dev/null +++ b/tests/utils/deployments/deployment.go @@ -0,0 +1,90 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package deployments contains functions to control deployments +package deployments + +import ( + "context" + "fmt" + "time" + + "github.com/avast/retry-go/v4" + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// IsReady checks if a Deployment is ready +func IsReady(deployment appsv1.Deployment) bool { + // If the deployment has been scaled down to 0 replicas, we consider it ready + if deployment.Status.Replicas == 0 && *deployment.Spec.Replicas == 0 { + return true + } + + if deployment.Status.ObservedGeneration < deployment.Generation || + deployment.Status.UpdatedReplicas < deployment.Status.Replicas || + deployment.Status.AvailableReplicas < deployment.Status.Replicas || + deployment.Status.ReadyReplicas < deployment.Status.Replicas { + return false + } + + if deployment.Status.Conditions == nil { + return false + } + for _, condition := range deployment.Status.Conditions { + if condition.Type == appsv1.DeploymentAvailable && condition.Status != "True" { + return false + } + if condition.Type == appsv1.DeploymentProgressing && condition.Status != "True" { + return false + } + } + return true +} + +// WaitForReady waits for a Deployment to be ready +func WaitForReady( + ctx context.Context, + crudClient client.Client, + deployment *appsv1.Deployment, + timeoutSeconds uint, +) error { + err := retry.Do( + func() error { + if err := crudClient.Get(ctx, client.ObjectKey{ + Namespace: deployment.Namespace, + Name: deployment.Name, + }, deployment); err != nil { + return err + } + if !IsReady(*deployment) { + return fmt.Errorf( + "deployment not ready. Namespace: %v, Name: %v", + deployment.Namespace, + deployment.Name, + ) + } + return nil + }, + retry.Attempts(timeoutSeconds), + retry.Delay(time.Second), + retry.DelayType(retry.FixedDelay), + ) + return err +} diff --git a/pkg/resources/instance/doc.go b/tests/utils/doc.go similarity index 71% rename from pkg/resources/instance/doc.go rename to tests/utils/doc.go index 975dc071f9..1ce7dbcf8a 100644 --- a/pkg/resources/instance/doc.go +++ b/tests/utils/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,7 +13,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package instance contains the client capable of querying the HTTP instances endpoints -package instance +// Package utils contains helper functions/methods for e2e +package utils diff --git a/tests/utils/endpoints.go b/tests/utils/endpoints.go index 7699def778..66eabd4849 100644 --- a/tests/utils/endpoints.go +++ b/tests/utils/endpoints.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +13,55 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils -import corev1 "k8s.io/api/core/v1" +import ( + "context" + "fmt" + + discoveryv1 "k8s.io/api/discovery/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) -// FirstEndpointIP returns the IP of first Address in the Endpoint -func FirstEndpointIP(endpoint *corev1.Endpoints) string { +// FirstEndpointSliceIP returns the IP of the first Address in the EndpointSlice +func FirstEndpointSliceIP(endpoint *discoveryv1.EndpointSlice) string { if endpoint == nil { return "" } - if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { + if len(endpoint.Endpoints) == 0 || len(endpoint.Endpoints[0].Addresses) == 0 { return "" } - return endpoint.Subsets[0].Addresses[0].IP + return endpoint.Endpoints[0].Addresses[0] +} + +// GetEndpointSliceByServiceName returns the EndpointSlice for a given service name in a given namespace +func GetEndpointSliceByServiceName( + ctx context.Context, + crudClient client.Client, + namespace, serviceName string, +) (*discoveryv1.EndpointSlice, error) { + endpointSliceList := &discoveryv1.EndpointSliceList{} + + if err := crudClient.List( + ctx, + endpointSliceList, + client.InNamespace(namespace), + client.MatchingLabels{"kubernetes.io/service-name": serviceName}, + ); err != nil { + return nil, err + } + + if len(endpointSliceList.Items) == 0 { + return nil, fmt.Errorf("no endpointslice found for service %s in namespace %s", serviceName, namespace) + } + + if len(endpointSliceList.Items) > 1 { + return nil, fmt.Errorf("multiple endpointslice found for service %s in namespace %s", serviceName, namespace) + } + + return &endpointSliceList.Items[0], nil } diff --git a/tests/utils/environment.go b/tests/utils/environment.go deleted file mode 100644 index ea9cefe4bb..0000000000 --- a/tests/utils/environment.go +++ /dev/null @@ -1,325 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/go-logr/logr" - storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/thoas/go-funk" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - eventsv1 "k8s.io/api/events/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/utils/strings/slices" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" - - // Import the client auth plugin package to allow use gke or ake to run tests - _ "k8s.io/client-go/plugin/pkg/client/auth" - - . "github.com/onsi/gomega" // nolint -) - -const ( - // RetryTimeout retry timeout (in seconds) when a client api call or kubectl cli request get failed - RetryTimeout = 60 - // RetryAttempts maximum number of attempts when it fails in `retry`. Mainly used in `RunUncheckedRetry` - RetryAttempts = 5 - // PollingTime polling interval (in seconds) between retries - PollingTime = 5 - // sternLogDirectory contains the fixed path to store the cluster logs - sternLogDirectory = "cluster_logs/" -) - -// TestingEnvironment struct for operator testing -type TestingEnvironment struct { - RestClientConfig *rest.Config - Client client.Client - Interface kubernetes.Interface - APIExtensionClient apiextensionsclientset.Interface - Ctx context.Context - Scheme *runtime.Scheme - PreserveNamespaces []string - Log logr.Logger - PostgresVersion int - createdNamespaces *uniqueStringSlice - AzureConfiguration AzureConfiguration - SternLogDir string -} - -type uniqueStringSlice struct { - values []string - mu sync.RWMutex -} - -func (a *uniqueStringSlice) generateUniqueName(prefix string) string { - a.mu.Lock() - defer a.mu.Unlock() - - for { - potentialUniqueName := fmt.Sprintf("%s-%d", prefix, funk.RandomInt(0, 9999)) - if !slices.Contains(a.values, potentialUniqueName) { - a.values = append(a.values, potentialUniqueName) - return potentialUniqueName - } - } -} - -// NewTestingEnvironment creates the environment for testing -func NewTestingEnvironment() (*TestingEnvironment, error) { - var env TestingEnvironment - var err error - env.RestClientConfig = ctrl.GetConfigOrDie() - env.Interface = kubernetes.NewForConfigOrDie(env.RestClientConfig) - env.APIExtensionClient = apiextensionsclientset.NewForConfigOrDie(env.RestClientConfig) - env.Ctx = context.Background() - env.Scheme = runtime.NewScheme() - env.SternLogDir = sternLogDirectory - - if err := storagesnapshotv1.AddToScheme(env.Scheme); err != nil { - return nil, err - } - - if err := monitoringv1.AddToScheme(env.Scheme); err != nil { - return nil, err - } - - flags := log.NewFlags(zap.Options{ - Development: true, - }) - log.SetLogLevel(log.DebugLevelString) - flags.ConfigureLogging() - env.Log = log.GetLogger().WithName("e2e").GetLogger() - log.SetLogger(env.Log) - - env.createdNamespaces = &uniqueStringSlice{} - - postgresImage := versions.DefaultImageName - - // Fetching postgres image version. - if postgresImageFromUser, exist := os.LookupEnv("POSTGRES_IMG"); exist { - postgresImage = postgresImageFromUser - } - imageReference := utils.NewReference(postgresImage) - postgresImageVersion, err := postgres.GetPostgresVersionFromTag(imageReference.Tag) - if err != nil { - return nil, err - } - env.PostgresVersion = postgresImageVersion / 10000 - - env.Client, err = client.New(env.RestClientConfig, client.Options{Scheme: env.Scheme}) - if err != nil { - return nil, err - } - - if preserveNamespaces := os.Getenv("PRESERVE_NAMESPACES"); preserveNamespaces != "" { - env.PreserveNamespaces = strings.Fields(preserveNamespaces) - } - - clientDiscovery, err := utils.GetDiscoveryClient() - if err != nil { - return nil, fmt.Errorf("could not get the discovery client: %w", err) - } - - err = utils.DetectSecurityContextConstraints(clientDiscovery) - if err != nil { - return nil, fmt.Errorf("could not detect SeccompProfile support: %w", err) - } - - env.AzureConfiguration = newAzureConfigurationFromEnv() - - return &env, nil -} - -// EventuallyExecCommand wraps the utils.ExecCommand pre-setting values constant during -// tests, wrapping it with an Eventually clause -func (env TestingEnvironment) EventuallyExecCommand( - ctx context.Context, - pod corev1.Pod, - containerName string, - timeout *time.Duration, - command ...string, -) (string, string, error) { - var stdOut, stdErr string - var err error - Eventually(func() error { - stdOut, stdErr, err = utils.ExecCommand(ctx, env.Interface, env.RestClientConfig, - pod, containerName, timeout, command...) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) - return stdOut, stdErr, err -} - -// ExecCommand wraps the utils.ExecCommand pre-setting values constant during -// tests -func (env TestingEnvironment) ExecCommand( - ctx context.Context, - pod corev1.Pod, - containerName string, - timeout *time.Duration, - command ...string, -) (string, string, error) { - return utils.ExecCommand(ctx, env.Interface, env.RestClientConfig, - pod, containerName, timeout, command...) -} - -// ExecCommandWithPsqlClient wraps the utils.ExecCommand pre-setting values and -// run query on psql client pod with rw service as host. -func (env TestingEnvironment) ExecCommandWithPsqlClient( - namespace, - clusterName string, - pod *corev1.Pod, - secretSuffix string, - dbname string, - query string, -) (string, string, error) { - timeout := time.Second * 10 - username, password, err := GetCredentials(clusterName, namespace, secretSuffix, &env) - if err != nil { - return "", "", err - } - rwService, err := GetRwServiceObject(namespace, clusterName, &env) - if err != nil { - return "", "", err - } - host := CreateServiceFQDN(namespace, rwService.GetName()) - dsn := CreateDSN(host, username, dbname, password, Prefer, 5432) - return utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, - *pod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", query) -} - -// GetPVCList gathers the current list of PVCs in a namespace -func (env TestingEnvironment) GetPVCList(namespace string) (*corev1.PersistentVolumeClaimList, error) { - pvcList := &corev1.PersistentVolumeClaimList{} - err := env.Client.List( - env.Ctx, pvcList, client.InNamespace(namespace), - ) - return pvcList, err -} - -// GetSnapshotList gathers the current list of VolumeSnapshots in a namespace -func (env TestingEnvironment) GetSnapshotList(namespace string) (*storagesnapshotv1.VolumeSnapshotList, error) { - list := &storagesnapshotv1.VolumeSnapshotList{} - err := env.Client.List(env.Ctx, list, client.InNamespace(namespace)) - - return list, err -} - -// GetJobList gathers the current list of jobs in a namespace -func (env TestingEnvironment) GetJobList(namespace string) (*batchv1.JobList, error) { - jobList := &batchv1.JobList{} - err := env.Client.List( - env.Ctx, jobList, client.InNamespace(namespace), - ) - return jobList, err -} - -// GetServiceAccountList gathers the current list of jobs in a namespace -func (env TestingEnvironment) GetServiceAccountList(namespace string) (*corev1.ServiceAccountList, error) { - serviceAccountList := &corev1.ServiceAccountList{} - err := env.Client.List( - env.Ctx, serviceAccountList, client.InNamespace(namespace), - ) - return serviceAccountList, err -} - -// GetEventList gathers the current list of events in a namespace -func (env TestingEnvironment) GetEventList(namespace string) (*eventsv1.EventList, error) { - eventList := &eventsv1.EventList{} - err := env.Client.List( - env.Ctx, eventList, client.InNamespace(namespace), - ) - return eventList, err -} - -// GetNodeList gathers the current list of Nodes -func (env TestingEnvironment) GetNodeList() (*corev1.NodeList, error) { - nodeList := &corev1.NodeList{} - err := env.Client.List(env.Ctx, nodeList, client.InNamespace("")) - return nodeList, err -} - -// GetBackupList gathers the current list of backup in namespace -func (env TestingEnvironment) GetBackupList(namespace string) (*apiv1.BackupList, error) { - backupList := &apiv1.BackupList{} - err := env.Client.List( - env.Ctx, backupList, client.InNamespace(namespace), - ) - return backupList, err -} - -// GetScheduledBackupList gathers the current list of scheduledBackup in namespace -func (env TestingEnvironment) GetScheduledBackupList(namespace string) (*apiv1.ScheduledBackupList, error) { - scheduledBackupList := &apiv1.ScheduledBackupList{} - err := env.Client.List( - env.Ctx, scheduledBackupList, client.InNamespace(namespace), - ) - return scheduledBackupList, err -} - -// GetResourceNamespacedNameFromYAML returns the NamespacedName representing a resource in a YAML file -func (env TestingEnvironment) GetResourceNamespacedNameFromYAML(path string) (types.NamespacedName, error) { - data, err := os.ReadFile(filepath.Clean(path)) - if err != nil { - return types.NamespacedName{}, err - } - decoder := serializer.NewCodecFactory(env.Scheme).UniversalDeserializer() - obj, _, err := decoder.Decode(data, nil, nil) - if err != nil { - return types.NamespacedName{}, err - } - objectMeta, err := meta.Accessor(obj) - if err != nil { - return types.NamespacedName{}, err - } - return types.NamespacedName{Namespace: objectMeta.GetNamespace(), Name: objectMeta.GetName()}, nil -} - -// GetResourceNameFromYAML returns the name of a resource in a YAML file -func (env TestingEnvironment) GetResourceNameFromYAML(path string) (string, error) { - namespacedName, err := env.GetResourceNamespacedNameFromYAML(path) - if err != nil { - return "", err - } - return namespacedName.Name, err -} diff --git a/tests/utils/environment/doc.go b/tests/utils/environment/doc.go new file mode 100644 index 0000000000..c0e5f380b2 --- /dev/null +++ b/tests/utils/environment/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package environment contains functions to handle the TestingEnvironment struct +package environment diff --git a/tests/utils/environment/environment.go b/tests/utils/environment/environment.go new file mode 100644 index 0000000000..f66a4944c7 --- /dev/null +++ b/tests/utils/environment/environment.go @@ -0,0 +1,188 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package environment + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/go-logr/logr" + storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/thoas/go-funk" + corev1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/utils/strings/slices" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + + // Import the client auth plugin package to allow use gke or ake to run tests + _ "k8s.io/client-go/plugin/pkg/client/auth" + + . "github.com/onsi/ginkgo/v2" // nolint + . "github.com/onsi/gomega" // nolint +) + +const ( + // RetryTimeout retry timeout (in seconds) when a client api call or kubectl cli request get failed + RetryTimeout = 60 +) + +// TestingEnvironment struct for operator testing +type TestingEnvironment struct { + RestClientConfig *rest.Config + Client client.Client + Interface kubernetes.Interface + APIExtensionClient apiextensionsclientset.Interface + Ctx context.Context + Scheme *runtime.Scheme + Log logr.Logger + PostgresVersion uint64 + createdNamespaces *uniqueStringSlice +} + +type uniqueStringSlice struct { + values []string + mu sync.RWMutex +} + +func (a *uniqueStringSlice) generateUniqueName(prefix string) string { + a.mu.Lock() + defer a.mu.Unlock() + process := GinkgoParallelProcess() + + for { + potentialUniqueName := fmt.Sprintf("%s-%d-%d", prefix, process, funk.RandomInt(0, 9999)) + if !slices.Contains(a.values, potentialUniqueName) { + a.values = append(a.values, potentialUniqueName) + return potentialUniqueName + } + } +} + +// NewTestingEnvironment creates the environment for testing +func NewTestingEnvironment() (*TestingEnvironment, error) { + var env TestingEnvironment + var err error + env.RestClientConfig = ctrl.GetConfigOrDie() + env.Interface = kubernetes.NewForConfigOrDie(env.RestClientConfig) + env.APIExtensionClient = apiextensionsclientset.NewForConfigOrDie(env.RestClientConfig) + env.Ctx = context.Background() + env.Scheme = runtime.NewScheme() + + if err := storagesnapshotv1.AddToScheme(env.Scheme); err != nil { + return nil, err + } + + if err := monitoringv1.AddToScheme(env.Scheme); err != nil { + return nil, err + } + + flags := log.NewFlags(zap.Options{ + Development: true, + }) + log.SetLogLevel(log.DebugLevelString) + flags.ConfigureLogging() + env.Log = log.GetLogger().WithName("e2e").GetLogger() + log.SetLogger(env.Log) + + env.createdNamespaces = &uniqueStringSlice{} + + postgresImage := versions.DefaultImageName + + // Fetching postgres image version. + if postgresImageFromUser, exist := os.LookupEnv("POSTGRES_IMG"); exist { + postgresImage = postgresImageFromUser + } + imageReference := reference.New(postgresImage) + postgresImageVersion, err := version.FromTag(imageReference.Tag) + if err != nil { + return nil, err + } + env.PostgresVersion = postgresImageVersion.Major() + + env.Client, err = client.New(env.RestClientConfig, client.Options{Scheme: env.Scheme}) + if err != nil { + return nil, err + } + + clientDiscovery, err := utils.GetDiscoveryClient() + if err != nil { + return nil, fmt.Errorf("could not get the discovery client: %w", err) + } + + err = utils.DetectSecurityContextConstraints(clientDiscovery) + if err != nil { + return nil, fmt.Errorf("could not detect SeccompProfile support: %w", err) + } + + return &env, nil +} + +// EventuallyExecCommand wraps the utils.ExecCommand pre-setting values constant during +// tests, wrapping it with an Eventually clause +func (env TestingEnvironment) EventuallyExecCommand( + ctx context.Context, + pod corev1.Pod, + containerName string, + timeout *time.Duration, + command ...string, +) (string, string, error) { + var stdOut, stdErr string + var err error + Eventually(func() error { + stdOut, stdErr, err = utils.ExecCommand(ctx, env.Interface, env.RestClientConfig, + pod, containerName, timeout, command...) + if err != nil { + return err + } + return nil + }, RetryTimeout, objects.PollingTime).Should(Succeed()) + return stdOut, stdErr, err +} + +// CreateUniqueTestNamespace creates a namespace by using the passed prefix. +// Return the namespace name and any errors encountered. +// The namespace is automatically cleaned up at the end of the test. +func (env TestingEnvironment) CreateUniqueTestNamespace( + ctx context.Context, + crudClient client.Client, + namespacePrefix string, + opts ...client.CreateOption, +) (string, error) { + name := env.createdNamespaces.generateUniqueName(namespacePrefix) + + return name, namespaces.CreateTestNamespace(ctx, crudClient, name, opts...) +} diff --git a/tests/utils/namespace_test.go b/tests/utils/environment/environment_test.go similarity index 89% rename from tests/utils/namespace_test.go rename to tests/utils/environment/environment_test.go index 2919a8f915..ab4a341dc2 100644 --- a/tests/utils/namespace_test.go +++ b/tests/utils/environment/environment_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package environment import ( . "github.com/onsi/ginkgo/v2" diff --git a/tests/utils/environment/suite_test.go b/tests/utils/environment/suite_test.go new file mode 100644 index 0000000000..aea6bf454d --- /dev/null +++ b/tests/utils/environment/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package environment + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test environment suite") +} diff --git a/tests/utils/envsubst/doc.go b/tests/utils/envsubst/doc.go new file mode 100644 index 0000000000..c1bc024bdc --- /dev/null +++ b/tests/utils/envsubst/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package envsubst manage the replacemes of env variables in a file +package envsubst diff --git a/tests/utils/envsubst.go b/tests/utils/envsubst/envsubst.go similarity index 93% rename from tests/utils/envsubst.go rename to tests/utils/envsubst/envsubst.go index 74e23f77a8..a9233ac824 100644 --- a/tests/utils/envsubst.go +++ b/tests/utils/envsubst/envsubst.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package envsubst import ( "bytes" diff --git a/tests/utils/envsubst_test.go b/tests/utils/envsubst/envsubst_test.go similarity index 91% rename from tests/utils/envsubst_test.go rename to tests/utils/envsubst/envsubst_test.go index 1557a2dccf..6b2cdc4a5b 100644 --- a/tests/utils/envsubst_test.go +++ b/tests/utils/envsubst/envsubst_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package envsubst import ( "errors" diff --git a/tests/utils/envsubst/suite_test.go b/tests/utils/envsubst/suite_test.go new file mode 100644 index 0000000000..9a0bcee5b0 --- /dev/null +++ b/tests/utils/envsubst/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package envsubst + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test envsubst suite") +} diff --git a/tests/utils/exec/exec.go b/tests/utils/exec/exec.go new file mode 100644 index 0000000000..789814738d --- /dev/null +++ b/tests/utils/exec/exec.go @@ -0,0 +1,174 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package exec provides functions to execute commands inside pods or from local +package exec + +import ( + "context" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + + . "github.com/onsi/gomega" // nolint +) + +// ContainerLocator contains the necessary data to find a container on a pod +type ContainerLocator struct { + Namespace string + PodName string + ContainerName string +} + +// CommandInContainer executes commands in a given instance pod, in the +// postgres container +func CommandInContainer( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + container ContainerLocator, + timeout *time.Duration, + command ...string, +) (string, string, error) { + wrapErr := func(err error) error { + return fmt.Errorf("while executing command in pod '%s/%s': %w", + container.Namespace, container.PodName, err) + } + pod, err := pods.Get(ctx, crudClient, container.Namespace, container.PodName) + if err != nil { + return "", "", wrapErr(err) + } + if !pkgutils.IsPodReady(*pod) { + return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) + } + return Command(ctx, kubeInterface, restConfig, *pod, container.ContainerName, timeout, command...) +} + +// PodLocator contains the necessary data to find a pod +type PodLocator struct { + Namespace string + PodName string +} + +// CommandInInstancePod executes commands in a given instance pod, in the +// postgres container +func CommandInInstancePod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + timeout *time.Duration, + command ...string, +) (string, string, error) { + return CommandInContainer( + ctx, crudClient, kubeInterface, restConfig, + ContainerLocator{ + Namespace: podLocator.Namespace, + PodName: podLocator.PodName, + ContainerName: specs.PostgresContainerName, + }, timeout, command...) +} + +// DatabaseName is a special type for the database argument in an Exec call +type DatabaseName string + +// QueryInInstancePod executes a query in an instance pod, by connecting to the pod +// and the postgres container, and using a local connection with the postgres user +func QueryInInstancePod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + dbname DatabaseName, + query string, +) (string, string, error) { + timeout := time.Second * 10 + return QueryInInstancePodWithTimeout(ctx, crudClient, kubeInterface, restConfig, podLocator, dbname, query, timeout) +} + +// QueryInInstancePodWithTimeout executes a query in an instance pod, by connecting to the pod +// and the postgres container, and using a local connection with the postgres user +func QueryInInstancePodWithTimeout( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + dbname DatabaseName, + query string, + timeout time.Duration, +) (string, string, error) { + return CommandInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + PodLocator{ + Namespace: podLocator.Namespace, + PodName: podLocator.PodName, + }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query) +} + +// EventuallyExecQueryInInstancePod wraps QueryInInstancePod with an Eventually clause +func EventuallyExecQueryInInstancePod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + dbname DatabaseName, + query string, + retryTimeout int, + pollingTime int, +) (string, string, error) { + var stdOut, stdErr string + var err error + + Eventually(func() error { + stdOut, stdErr, err = QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + podLocator, dbname, query) + return err + }, retryTimeout, pollingTime).Should(Succeed()) + + return stdOut, stdErr, err +} + +// Command wraps the utils.ExecCommand pre-setting values constant during +// tests +func Command( + ctx context.Context, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + pod v1.Pod, + containerName string, + timeout *time.Duration, + command ...string, +) (string, string, error) { + return pkgutils.ExecCommand(ctx, kubeInterface, restConfig, + pod, containerName, timeout, command...) +} diff --git a/tests/utils/fence.go b/tests/utils/fencing/fencing.go similarity index 57% rename from tests/utils/fence.go rename to tests/utils/fencing/fencing.go index bbbc52491d..edbdf34286 100644 --- a/tests/utils/fence.go +++ b/tests/utils/fencing/fencing.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,49 +13,56 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package fencing provides functions to manage the fencing on cnpg clusters +package fencing import ( + "context" "fmt" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" ) -// FencingMethod will be one of the supported ways to trigger an instance fencing -type FencingMethod string +// Method will be one of the supported ways to trigger an instance fencing +type Method string const ( // UsingAnnotation it is a keyword to use while fencing on/off the instances using annotation method - UsingAnnotation FencingMethod = "annotation" + UsingAnnotation Method = "annotation" // UsingPlugin it is a keyword to use while fencing on/off the instances using plugin method - UsingPlugin FencingMethod = "plugin" + UsingPlugin Method = "plugin" ) -// FencingOn marks an instance in a cluster as fenced -func FencingOn( - env *TestingEnvironment, +// On marks an instance in a cluster as fenced +func On( + ctx context.Context, + crudClient client.Client, serverName, namespace, clusterName string, - fencingMethod FencingMethod, + fencingMethod Method, ) error { switch fencingMethod { case UsingPlugin: - _, _, err := Run(fmt.Sprintf("kubectl cnpg fencing on %v %v -n %v", + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg fencing on %v %v -n %v", clusterName, serverName, namespace)) if err != nil { return err } case UsingAnnotation: - err := utils.NewFencingMetadataExecutor(env.Client). + err := utils.NewFencingMetadataExecutor(crudClient). AddFencing(). ForInstance(serverName). - Execute(env.Ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) + Execute(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) if err != nil { return err } @@ -64,26 +72,27 @@ func FencingOn( return nil } -// FencingOff marks an instance in a cluster as not fenced -func FencingOff( - env *TestingEnvironment, +// Off marks an instance in a cluster as not fenced +func Off( + ctx context.Context, + crudClient client.Client, serverName, namespace, clusterName string, - fencingMethod FencingMethod, + fencingMethod Method, ) error { switch fencingMethod { case UsingPlugin: - _, _, err := Run(fmt.Sprintf("kubectl cnpg fencing off %v %v -n %v", + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg fencing off %v %v -n %v", clusterName, serverName, namespace)) if err != nil { return err } case UsingAnnotation: - err := utils.NewFencingMetadataExecutor(env.Client). + err := utils.NewFencingMetadataExecutor(crudClient). RemoveFencing(). ForInstance(serverName). - Execute(env.Ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) + Execute(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) if err != nil { return err } diff --git a/tests/utils/forwardconnection/doc.go b/tests/utils/forwardconnection/doc.go new file mode 100644 index 0000000000..5292443626 --- /dev/null +++ b/tests/utils/forwardconnection/doc.go @@ -0,0 +1,23 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package forwardconnection provides an easy interface to create +// a port forward from the local test to a service or pod +// inside the testing k8s cluster +package forwardconnection diff --git a/tests/utils/forwardconnection/forwardconnection.go b/tests/utils/forwardconnection/forwardconnection.go new file mode 100644 index 0000000000..8ac8441941 --- /dev/null +++ b/tests/utils/forwardconnection/forwardconnection.go @@ -0,0 +1,224 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package forwardconnection + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +// PostgresPortMap is the default port map for the PostgreSQL Pod +const PostgresPortMap = "0:5432" + +// ForwardConnection holds the necessary information to manage a port-forward +// against a service of pod inside Kubernetes +type ForwardConnection struct { + Forwarder *portforward.PortForwarder + stopChannel chan struct{} + readyChannel chan struct{} +} + +// NewDialerFromService returns a Dialer against the service specified +func NewDialerFromService( + ctx context.Context, + kubeInterface kubernetes.Interface, + config *rest.Config, + namespace, + service string, +) (dialer httpstream.Dialer, portMaps []string, err error) { + pod, portMap, err := getPodAndPortsFromService(ctx, kubeInterface, namespace, service) + if err != nil { + return nil, nil, err + } + + dial, err := NewDialer(kubeInterface, config, namespace, pod) + if err != nil { + return nil, nil, err + } + + return dial, portMap, nil +} + +// NewForwardConnection returns a PortForwarder against the pod specified +func NewForwardConnection( + dialer httpstream.Dialer, + portMaps []string, + outWriter, + errWriter io.Writer, +) (*ForwardConnection, error) { + fc := &ForwardConnection{ + stopChannel: make(chan struct{}), + readyChannel: make(chan struct{}, 1), + } + + var err error + fc.Forwarder, err = portforward.New( + dialer, + portMaps, + fc.stopChannel, + fc.readyChannel, + outWriter, + errWriter, + ) + if err != nil { + return nil, err + } + + return fc, nil +} + +// NewDialer returns a Dialer to be used with a PortForwarder +func NewDialer( + kubeInterface kubernetes.Interface, + config *rest.Config, + namespace string, + pod string, +) (httpstream.Dialer, error) { + req := kubeInterface.CoreV1(). + RESTClient(). + Post(). + Resource("pods"). + Namespace(namespace). + Name(pod). + SubResource("portforward") + + transport, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) + return dialer, nil +} + +// StartAndWait begins the port-forwarding and waits until it's ready +func (fc *ForwardConnection) StartAndWait() error { + var err error + go func() { + ginkgo.GinkgoWriter.Println("Starting port-forward") + err = fc.Forwarder.ForwardPorts() + if err != nil { + ginkgo.GinkgoWriter.Printf("port-forward failed with error %s\n", err.Error()) + return + } + }() + if err != nil { + return fmt.Errorf("error starting port-forward: %w", err) + } + select { + case <-fc.readyChannel: + ginkgo.GinkgoWriter.Println("port-forward ready") + return nil + case <-fc.stopChannel: + ginkgo.GinkgoWriter.Println("port-forward closed") + return err + } +} + +// GetLocalPort will return the local port where the forward has started +func (fc *ForwardConnection) GetLocalPort() (string, error) { + ports, err := fc.Forwarder.GetPorts() + if err != nil { + return "", err + } + return strconv.Itoa(int(ports[0].Local)), nil +} + +// getPortMap takes the first port between the list of ports exposed by the given service, and +// returns a map with 0 as the local port for auto-assignment +func getPortMap(serviceObj *corev1.Service) ([]string, error) { + if len(serviceObj.Spec.Ports) == 0 { + return []string{}, fmt.Errorf("service %s has no ports", serviceObj.Name) + } + port := serviceObj.Spec.Ports[0].Port + return []string{fmt.Sprintf("0:%d", port)}, nil +} + +func getPodAndPortsFromService( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace, + service string, +) (string, []string, error) { + serviceObj, err := getServiceObject(ctx, kubeInterface, namespace, service) + if err != nil { + return "", nil, err + } + + podObj, err := getPodFromService(ctx, kubeInterface, serviceObj) + if err != nil { + return "", nil, err + } + + portMaps, err := getPortMap(serviceObj) + if err != nil { + return "", nil, err + } + + return podObj.Name, portMaps, nil +} + +func getServiceObject( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace, + service string, +) (*corev1.Service, error) { + return kubeInterface.CoreV1().Services(namespace).Get(ctx, service, metav1.GetOptions{}) +} + +func getPodFromService( + ctx context.Context, + kubeInterface kubernetes.Interface, + serviceObj *corev1.Service, +) (*corev1.Pod, error) { + namespace := serviceObj.Namespace + + labelSelector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: serviceObj.Spec.Selector, + }) + if err != nil { + return nil, err + } + + podList, err := kubeInterface.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector.String(), + }) + if err != nil { + return nil, err + } + + if len(podList.Items) == 0 { + return nil, fmt.Errorf("no pods found for service %s", serviceObj.Name) + } + + return &podList.Items[0], nil +} diff --git a/tests/utils/hibernate.go b/tests/utils/hibernate.go deleted file mode 100644 index 3faee5a5a6..0000000000 --- a/tests/utils/hibernate.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "context" - "fmt" - - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// HibernationMethod will be one of the supported ways to trigger an instance fencing -type HibernationMethod string - -const ( - // HibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method - HibernateDeclaratively HibernationMethod = "annotation" - // HibernateImperatively it is a keyword to use while fencing on/off the instances using plugin method - HibernateImperatively HibernationMethod = "plugin" -) - -// HibernateOn hibernate on a cluster -func HibernateOn( - env *TestingEnvironment, - namespace, - clusterName string, - method HibernationMethod, -) error { - switch method { - case HibernateImperatively: - _, _, err := Run(fmt.Sprintf("kubectl cnpg hibernate on %v -n %v", - clusterName, namespace)) - if err != nil { - return err - } - return nil - case HibernateDeclaratively: - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return err - } - if cluster.Annotations == nil { - cluster.Annotations = make(map[string]string) - } - originCluster := cluster.DeepCopy() - cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOn - - err = env.Client.Patch(context.Background(), cluster, ctrlclient.MergeFrom(originCluster)) - return err - default: - return fmt.Errorf("unknown method: %v", method) - } -} - -// HibernateOff hibernate off a cluster -func HibernateOff( - env *TestingEnvironment, - namespace, - clusterName string, - method HibernationMethod, -) error { - switch method { - case HibernateImperatively: - _, _, err := Run(fmt.Sprintf("kubectl cnpg hibernate off %v -n %v", - clusterName, namespace)) - if err != nil { - return err - } - return nil - case HibernateDeclaratively: - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return err - } - if cluster.Annotations == nil { - cluster.Annotations = make(map[string]string) - } - originCluster := cluster.DeepCopy() - cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOff - - err = env.Client.Patch(context.Background(), cluster, ctrlclient.MergeFrom(originCluster)) - return err - default: - return fmt.Errorf("unknown method: %v", method) - } -} diff --git a/tests/utils/import_db.go b/tests/utils/importdb/import_db.go similarity index 79% rename from tests/utils/import_db.go rename to tests/utils/importdb/import_db.go index c3c7412f8d..87454d84e1 100644 --- a/tests/utils/import_db.go +++ b/tests/utils/importdb/import_db.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +13,27 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package importdb contains the functions to import a database +package importdb import ( + "context" "fmt" "os" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" ) // ImportDatabaseMicroservice creates a cluster, starting from an external cluster @@ -32,18 +41,19 @@ import ( // NOTE: the application user on the source Cluster needs to be granted with // REPLICATION permissions, which are not set by default func ImportDatabaseMicroservice( + ctx context.Context, + crudClient client.Client, namespace, sourceClusterName, importedClusterName, imageName, databaseName string, - env *TestingEnvironment, ) (*apiv1.Cluster, error) { if imageName == "" { imageName = os.Getenv("POSTGRES_IMG") } storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - host, err := GetHostName(namespace, sourceClusterName, env) + host, err := services.GetHostName(ctx, crudClient, namespace, sourceClusterName) if err != nil { return nil, err } @@ -70,6 +80,8 @@ func ImportDatabaseMicroservice( Source: apiv1.ImportSource{ ExternalCluster: sourceClusterName, }, + PgDumpExtraOptions: []string{"--jobs=2"}, + PgRestoreExtraOptions: []string{"--jobs=2"}, PostImportApplicationSQL: []string{"SELECT 1"}, }, }, @@ -80,8 +92,8 @@ func ImportDatabaseMicroservice( Name: sourceClusterName, ConnectionParameters: map[string]string{ "host": host, - "user": AppUser, - "dbname": AppDBName, + "user": postgres.AppUser, + "dbname": postgres.AppDBName, }, Password: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ @@ -94,7 +106,7 @@ func ImportDatabaseMicroservice( }, } - obj, err := CreateObject(env, restoreCluster) + obj, err := objects.Create(ctx, crudClient, restoreCluster) if err != nil { return nil, err } @@ -110,19 +122,20 @@ func ImportDatabaseMicroservice( // Imports all the specified `databaseNames` and `roles` from the source cluster // NOTE: enableSuperuserAccess needs to be enabled func ImportDatabasesMonolith( + ctx context.Context, + crudClient client.Client, namespace, sourceClusterName, importedClusterName, imageName string, databaseNames []string, roles []string, - env *TestingEnvironment, ) (*apiv1.Cluster, error) { if imageName == "" { imageName = os.Getenv("POSTGRES_IMG") } storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - host, err := GetHostName(namespace, sourceClusterName, env) + host, err := services.GetHostName(ctx, crudClient, namespace, sourceClusterName) if err != nil { return nil, err } @@ -159,8 +172,8 @@ func ImportDatabasesMonolith( Name: sourceClusterName, ConnectionParameters: map[string]string{ "host": host, - "user": PostgresUser, - "dbname": PostgresDBName, + "user": postgres.PostgresUser, + "dbname": postgres.PostgresDBName, }, Password: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ @@ -173,7 +186,7 @@ func ImportDatabasesMonolith( }, } - obj, err := CreateObject(env, targetCluster) + obj, err := objects.Create(ctx, crudClient, targetCluster) if err != nil { return nil, err } diff --git a/tests/utils/job.go b/tests/utils/job.go deleted file mode 100644 index a9ae454301..0000000000 --- a/tests/utils/job.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "errors" - "fmt" - - batchv1 "k8s.io/api/batch/v1" -) - -// GetJob gets a Job by namespace and name -func (env TestingEnvironment) GetJob(namespace, jobName string) (*batchv1.Job, error) { - wrapErr := func(err error) error { - return fmt.Errorf("while getting job '%s/%s': %w", namespace, jobName, err) - } - jobList, err := env.GetJobList(namespace) - if err != nil { - return nil, wrapErr(err) - } - for _, job := range jobList.Items { - if jobName == job.Name { - return &job, nil - } - } - return nil, wrapErr(errors.New("job not found")) -} diff --git a/tests/utils/logs/doc.go b/tests/utils/logs/doc.go new file mode 100644 index 0000000000..9e3a7506ee --- /dev/null +++ b/tests/utils/logs/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package logs provides a way to parse and get the logs of a pod +package logs diff --git a/tests/utils/logs.go b/tests/utils/logs/logs.go similarity index 90% rename from tests/utils/logs.go rename to tests/utils/logs/logs.go index 04c7e2318c..f37142ccf9 100644 --- a/tests/utils/logs.go +++ b/tests/utils/logs/logs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,23 +13,34 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package logs import ( + "context" "encoding/json" "fmt" "slices" "strings" "time" + + "k8s.io/client-go/kubernetes" + + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" ) // ParseJSONLogs returns the pod's logs of a given pod name, // in the form of a list of JSON entries -func ParseJSONLogs(namespace string, podName string, env *TestingEnvironment) ([]map[string]interface{}, error) { +func ParseJSONLogs( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace string, podName string, +) ([]map[string]interface{}, error) { // Gather pod logs - podLogs, err := env.GetPodLogs(namespace, podName) + podLogs, err := pods.Logs(ctx, kubeInterface, namespace, podName) if err != nil { return nil, err } diff --git a/tests/utils/logs_test.go b/tests/utils/logs/logs_test.go similarity index 94% rename from tests/utils/logs_test.go rename to tests/utils/logs/logs_test.go index d7fd064253..460b3b082d 100644 --- a/tests/utils/logs_test.go +++ b/tests/utils/logs/logs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package logs import ( "encoding/json" diff --git a/tests/utils/suite_test.go b/tests/utils/logs/suite_test.go similarity index 77% rename from tests/utils/suite_test.go rename to tests/utils/logs/suite_test.go index e15d55b783..e7365f2638 100644 --- a/tests/utils/suite_test.go +++ b/tests/utils/logs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package logs import ( "testing" @@ -25,5 +28,5 @@ import ( func TestUtils(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Utils test suite") + RunSpecs(t, "Utils test logs suite") } diff --git a/tests/utils/minio.go b/tests/utils/minio/minio.go similarity index 74% rename from tests/utils/minio.go rename to tests/utils/minio/minio.go index d69821c6f3..ba249e7673 100644 --- a/tests/utils/minio.go +++ b/tests/utils/minio/minio.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,14 +13,19 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package minio contains all the require functions to setup a MinIO deployment and +// query this MinIO deployment using the MinIO API +package minio import ( "encoding/json" "fmt" "os" + "path/filepath" "strconv" "strings" "time" @@ -36,16 +42,22 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" ) const ( - minioImage = "minio/minio:RELEASE.2022-06-20T23-13-45Z" - minioClientImage = "minio/mc:RELEASE.2022-06-11T21-10-36Z" + // minioImage is the image used to run a MinIO server + minioImage = "minio/minio:RELEASE.2025-07-23T15-54-02Z" + // minioClientImage is the image used to run a MinIO client + minioClientImage = "minio/mc:RELEASE.2025-07-21T05-28-08Z" ) -// MinioEnv contains all the information related or required by MinIO deployment and +// Env contains all the information related or required by MinIO deployment and // used by the functions on every test -type MinioEnv struct { +type Env struct { Client *corev1.Pod CaPair *certs.KeyPair CaSecretObj corev1.Secret @@ -56,9 +68,9 @@ type MinioEnv struct { Timeout uint } -// MinioSetup contains the resources needed for a working minio server deployment: +// Setup contains the resources needed for a working minio server deployment: // a PersistentVolumeClaim, a Deployment and a Service -type MinioSetup struct { +type Setup struct { PersistentVolumeClaim corev1.PersistentVolumeClaim Deployment appsv1.Deployment Service corev1.Service @@ -69,10 +81,10 @@ type TagSet struct { Tags map[string]string `json:"tagset"` } -// InstallMinio installs minio in a given namespace -func InstallMinio( - env *TestingEnvironment, - minioSetup MinioSetup, +// installMinio installs minio in a given namespace +func installMinio( + env *environment.TestingEnvironment, + minioSetup Setup, timeoutSeconds uint, ) error { if err := env.Client.Create(env.Ctx, &minioSetup.PersistentVolumeClaim); err != nil { @@ -110,15 +122,15 @@ func InstallMinio( return err } -// MinioDefaultSetup returns the definition for the default minio setup -func MinioDefaultSetup(namespace string) (MinioSetup, error) { - pvc, err := MinioDefaultPVC(namespace) +// defaultSetup returns the definition for the default minio setup +func defaultSetup(namespace string) (Setup, error) { + pvc, err := defaultPVC(namespace) if err != nil { - return MinioSetup{}, err + return Setup{}, err } - deployment := MinioDefaultDeployment(namespace, pvc) - service := MinioDefaultSVC(namespace) - setup := MinioSetup{ + deployment := defaultDeployment(namespace, pvc) + service := defaultSVC(namespace) + setup := Setup{ PersistentVolumeClaim: pvc, Deployment: deployment, Service: service, @@ -126,8 +138,8 @@ func MinioDefaultSetup(namespace string) (MinioSetup, error) { return setup, nil } -// MinioDefaultDeployment returns a default Deployment for minio -func MinioDefaultDeployment(namespace string, minioPVC corev1.PersistentVolumeClaim) appsv1.Deployment { +// defaultDeployment returns a default Deployment for minio +func defaultDeployment(namespace string, minioPVC corev1.PersistentVolumeClaim) appsv1.Deployment { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, } @@ -222,8 +234,8 @@ func MinioDefaultDeployment(namespace string, minioPVC corev1.PersistentVolumeCl return minioDeployment } -// MinioDefaultSVC returns a default Service for minio -func MinioDefaultSVC(namespace string) corev1.Service { +// defaultSVC returns a default Service for minio +func defaultSVC(namespace string) corev1.Service { minioService := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "minio-service", @@ -245,8 +257,8 @@ func MinioDefaultSVC(namespace string) corev1.Service { return minioService } -// MinioDefaultPVC returns a default PVC for minio -func MinioDefaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { +// defaultPVC returns a default PVC for minio +func defaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { const claimName = "minio-pv-claim" storageClass, ok := os.LookupEnv("E2E_DEFAULT_STORAGE_CLASS") if !ok { @@ -273,11 +285,11 @@ func MinioDefaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { return minioPVC, nil } -// MinioSSLSetup returns the definition for a minio setup using SSL -func MinioSSLSetup(namespace string) (MinioSetup, error) { - setup, err := MinioDefaultSetup(namespace) +// sslSetup returns the definition for a minio setup using SSL +func sslSetup(namespace string) (Setup, error) { + setup, err := defaultSetup(namespace) if err != nil { - return MinioSetup{}, err + return Setup{}, err } const tlsVolumeName = "secret-volume" const tlsVolumeMountPath = "/etc/secrets/certs" @@ -341,8 +353,8 @@ func MinioSSLSetup(namespace string) (MinioSetup, error) { return setup, nil } -// MinioDefaultClient returns the default Pod definition for a minio client -func MinioDefaultClient(namespace string) corev1.Pod { +// defaultClient returns the default Pod definition for a minio client +func defaultClient(namespace string) corev1.Pod { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, } @@ -403,8 +415,8 @@ func MinioDefaultClient(namespace string) corev1.Pod { return minioClient } -// MinioSSLClient returns the Pod definition for a minio client using SSL -func MinioSSLClient(namespace string) corev1.Pod { +// sslClient returns the Pod definition for a minio client using SSL +func sslClient(namespace string) corev1.Pod { const ( configVolumeMountPath = "/mc/.mc" configVolumeName = "mc-config" @@ -414,7 +426,7 @@ func MinioSSLClient(namespace string) corev1.Pod { ) var secretMode int32 = 0o600 - minioClient := MinioDefaultClient(namespace) + minioClient := defaultClient(namespace) minioClient.Spec.Volumes = append(minioClient.Spec.Volumes, corev1.Volume{ Name: configVolumeName, @@ -448,8 +460,8 @@ func MinioSSLClient(namespace string) corev1.Pod { return minioClient } -// MinioDeploy will create a full MinIO deployment defined inthe minioEnv variable -func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, error) { +// Deploy will create a full MinIO deployment defined inthe minioEnv variable +func Deploy(minioEnv *Env, env *environment.TestingEnvironment) (*corev1.Pod, error) { var err error minioEnv.CaPair, err = certs.CreateRootCA(minioEnv.Namespace, "minio") if err != nil { @@ -457,7 +469,7 @@ func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, erro } minioEnv.CaSecretObj = *minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) - if _, err = CreateObject(env, &minioEnv.CaSecretObj); err != nil { + if _, err = objects.Create(env.Ctx, env.Client, &minioEnv.CaSecretObj); err != nil { return nil, err } @@ -474,20 +486,20 @@ func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, erro return nil, err } - setup, err := MinioSSLSetup(minioEnv.Namespace) + setup, err := sslSetup(minioEnv.Namespace) if err != nil { return nil, err } - if err = InstallMinio(env, setup, minioEnv.Timeout); err != nil { + if err = installMinio(env, setup, minioEnv.Timeout); err != nil { return nil, err } - minioClient := MinioSSLClient(minioEnv.Namespace) + minioClient := sslClient(minioEnv.Namespace) - return &minioClient, PodCreateAndWaitForReady(env, &minioClient, 240) + return &minioClient, pods.CreateAndWaitForReady(env.Ctx, env.Client, &minioClient, 240) } -func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*corev1.Secret, error) { +func (m *Env) getCaSecret(env *environment.TestingEnvironment, namespace string) (*corev1.Secret, error) { var certSecret corev1.Secret if err := env.Client.Get(env.Ctx, types.NamespacedName{ @@ -507,25 +519,25 @@ func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*core }, nil } -// CreateCaSecret creates the certificates required to authenticate against the the MinIO service -func (m *MinioEnv) CreateCaSecret(env *TestingEnvironment, namespace string) error { +// CreateCaSecret creates the certificates required to authenticate against the MinIO service +func (m *Env) CreateCaSecret(env *environment.TestingEnvironment, namespace string) error { caSecret, err := m.getCaSecret(env, namespace) if err != nil { return err } - _, err = CreateObject(env, caSecret) + _, err = objects.Create(env.Ctx, env.Client, caSecret) return err } -// CountFilesOnMinio uses the minioClient in the given `namespace` to count the +// CountFiles uses the minioClient in the given `namespace` to count the // amount of files matching the given `path` -func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) { +func CountFiles(minioEnv *Env, path string) (value int, err error) { var stdout string - stdout, _, err = RunUnchecked(fmt.Sprintf( + stdout, _, err = run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeFindMinioCmd(path, "minio"))) + composeFindCmd(path, "minio"))) if err != nil { return -1, err } @@ -533,41 +545,41 @@ func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) { return value, err } -// ListFilesOnMinio uses the minioClient in the given `namespace` to list the +// ListFiles uses the minioClient in the given `namespace` to list the // paths matching the given `path` -func ListFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) { +func ListFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := RunUnchecked(fmt.Sprintf( + stdout, _, err := run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeListFilesMinio(path, "minio"))) + composeListFiles(path, "minio"))) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil } -// composeListFilesMinio builds the Minio command to list the filenames matching a given path -func composeListFilesMinio(path string, serviceName string) string { +// composeListFiles builds the Minio command to list the filenames matching a given path +func composeListFiles(path string, serviceName string) string { return fmt.Sprintf("sh -c 'mc find %v --path %v'", serviceName, path) } -// composeListFilesMinio builds the Minio command to list the filenames matching a given path -func composeCleanFilesMinio(path string) string { +// composeCleanFiles builds the Minio command to list the filenames matching a given path +func composeCleanFiles(path string) string { return fmt.Sprintf("sh -c 'mc rm --force --recursive %v'", path) } -// composeFindMinioCmd builds the Minio find command -func composeFindMinioCmd(path string, serviceName string) string { +// composeFindCmd builds the Minio find command +func composeFindCmd(path string, serviceName string) string { return fmt.Sprintf("sh -c 'mc find %v --path %v | wc -l'", serviceName, path) } -// GetFileTagsOnMinio will use the minioClient to retrieve the tags in a specified path -func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { +// GetFileTags will use the minioClient to retrieve the tags in a specified path +func GetFileTags(minioEnv *Env, path string) (TagSet, error) { var output TagSet // Make sure we have a registered backup to access - out, _, err := RunUncheckedRetry(fmt.Sprintf( + out, _, err := run.UncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc find minio --path %v | head -n1'", minioEnv.Namespace, minioEnv.Client.Name, @@ -578,7 +590,7 @@ func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { walFile := strings.Trim(out, "\n") - stdout, _, err := RunUncheckedRetry(fmt.Sprintf( + stdout, _, err := run.UncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc --json tag list %v'", minioEnv.Namespace, minioEnv.Client.Name, @@ -594,8 +606,8 @@ func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { return output, nil } -// MinioTestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false -func MinioTestConnectivityUsingBarmanCloudWalArchive( +// TestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false +func TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterName, podName, @@ -609,7 +621,7 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive( "barman-cloud-wal-archive --cloud-provider aws-s3 --endpoint-url https://%s:9000 s3://cluster-backups/ %s "+ "000000010000000000000000 --test", postgres.BarmanBackupEndpointCACertificateLocation, id, key, minioSvcName, clusterName) - _, _, err := RunUnchecked(fmt.Sprintf( + _, _, err := run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -c postgres -- /bin/bash -c \"%v\"", namespace, podName, @@ -620,16 +632,25 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive( return true, nil } -// CleanFilesOnMinio clean files on minio for a given path -func CleanFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) { +// CleanFiles clean files on minio for a given path +func CleanFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := RunUnchecked(fmt.Sprintf( + stdout, _, err := run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeCleanFilesMinio(path))) + composeCleanFiles(path))) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil } + +// GetFilePath gets the MinIO file string for WAL/backup objects in a configured bucket +func GetFilePath(serverName, fileName string) string { + // the * regexes enable matching these typical paths: + // minio/backups/serverName/base/20220618T140300/data.tar + // minio/backups/serverName/wals/0000000100000000/000000010000000000000002.gz + // minio/backups/serverName/wals/00000002.history.gz + return filepath.Join("*", serverName, "*", fileName) +} diff --git a/tests/utils/monitoring.go b/tests/utils/monitoring.go deleted file mode 100644 index 2a7c12b3b4..0000000000 --- a/tests/utils/monitoring.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "k8s.io/apimachinery/pkg/types" -) - -// GetPodMonitor gathers the current PodMonitor in a namespace -func (env TestingEnvironment) GetPodMonitor(namespace string, name string) (*monitoringv1.PodMonitor, error) { - podMonitor := &monitoringv1.PodMonitor{} - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - - err := GetObject(&env, namespacedName, podMonitor) - if err != nil { - return nil, err - } - return podMonitor, nil -} diff --git a/tests/utils/namespace.go b/tests/utils/namespace.go deleted file mode 100644 index b7b0b73d6c..0000000000 --- a/tests/utils/namespace.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bytes" - "errors" - "fmt" - "path" - "strings" - - "github.com/cloudnative-pg/machinery/pkg/fileutils" - "github.com/onsi/ginkgo/v2" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" -) - -// GetOperatorLogs collects the operator logs -func (env TestingEnvironment) GetOperatorLogs(buf *bytes.Buffer) error { - operatorPod, err := env.GetOperatorPod() - if err != nil { - return err - } - - streamPodLog := logs.StreamingRequest{ - Pod: &operatorPod, - Options: &corev1.PodLogOptions{ - Timestamps: false, - Follow: false, - }, - Client: env.Interface, - } - return streamPodLog.Stream(env.Ctx, buf) -} - -// CleanupNamespace does cleanup duty related to the tear-down of a namespace, -// and is intended to be called in a DeferCleanup clause -func (env TestingEnvironment) CleanupNamespace( - namespace string, - testName string, - testFailed bool, -) error { - if testFailed { - env.DumpNamespaceObjects(namespace, "out/"+testName+".log") - } - - if len(namespace) == 0 { - return fmt.Errorf("namespace is empty") - } - exists, _ := fileutils.FileExists(path.Join(env.SternLogDir, namespace)) - if exists && !testFailed { - err := fileutils.RemoveDirectory(path.Join(env.SternLogDir, namespace)) - if err != nil { - return err - } - } - - return env.DeleteNamespace(namespace) -} - -// CreateUniqueTestNamespace creates a namespace by using the passed prefix. -// Return the namespace name and any errors encountered. -// The namespace is automatically cleaned up at the end of the test. -func (env TestingEnvironment) CreateUniqueTestNamespace( - namespacePrefix string, - opts ...client.CreateOption, -) (string, error) { - name := env.createdNamespaces.generateUniqueName(namespacePrefix) - - return name, env.CreateTestNamespace(name, opts...) -} - -// CreateTestNamespace creates a namespace creates a namespace. -// Prefer CreateUniqueTestNamespace instead, unless you need a -// specific namespace name. If so, make sure there is no collision -// potential. -// The namespace is automatically cleaned up at the end of the test. -func (env TestingEnvironment) CreateTestNamespace( - name string, - opts ...client.CreateOption, -) error { - err := env.CreateNamespace(name, opts...) - if err != nil { - return err - } - - ginkgo.DeferCleanup(func() error { - return env.CleanupNamespace( - name, - ginkgo.CurrentSpecReport().LeafNodeText, - ginkgo.CurrentSpecReport().Failed(), - ) - }) - - return nil -} - -// CreateNamespace creates a namespace. -func (env TestingEnvironment) CreateNamespace(name string, opts ...client.CreateOption) error { - // Exit immediately if the name is empty - if name == "" { - return errors.New("cannot create namespace with empty name") - } - - u := &unstructured.Unstructured{} - u.SetName(name) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Namespace", - }) - _, err := CreateObject(&env, u, opts...) - return err -} - -// EnsureNamespace checks for the presence of a namespace, and if it does not -// exist, creates it -func (env TestingEnvironment) EnsureNamespace(namespace string) error { - var nsList corev1.NamespaceList - err := GetObjectList(&env, &nsList) - if err != nil { - return err - } - for _, ns := range nsList.Items { - if ns.Name == namespace { - return nil - } - } - return env.CreateNamespace(namespace) -} - -// DeleteNamespace deletes a namespace if existent -func (env TestingEnvironment) DeleteNamespace(name string, opts ...client.DeleteOption) error { - // Exit immediately if the name is empty - if name == "" { - return errors.New("cannot delete namespace with empty name") - } - - // Exit immediately if the namespace is listed in PreserveNamespaces - for _, v := range env.PreserveNamespaces { - if strings.HasPrefix(name, v) { - return nil - } - } - - u := &unstructured.Unstructured{} - u.SetName(name) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Namespace", - }) - - return DeleteObject(&env, u, opts...) -} - -// DeleteNamespaceAndWait deletes a namespace if existent and returns when deletion is completed -func (env TestingEnvironment) DeleteNamespaceAndWait(name string, timeoutSeconds int) error { - // Exit immediately if the namespace is listed in PreserveNamespaces - for _, v := range env.PreserveNamespaces { - if strings.HasPrefix(name, v) { - return nil - } - } - - _, _, err := Run(fmt.Sprintf("kubectl delete namespace %v --wait=true --timeout %vs", name, timeoutSeconds)) - - return err -} diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go new file mode 100644 index 0000000000..8fabffcb2f --- /dev/null +++ b/tests/utils/namespaces/namespace.go @@ -0,0 +1,398 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package namespaces provides utilities to manage namespaces +package namespaces + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/onsi/ginkgo/v2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + v1 "k8s.io/api/events/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" +) + +// SternLogDirectory contains the fixed path to store the cluster logs +const SternLogDirectory = "cluster_logs/" + +func getPreserveNamespaces() []string { + var preserveNamespacesList []string + _, ok := os.LookupEnv("PRESERVE_NAMESPACES") + if ok { + preserveNamespacesList = strings.Fields(os.Getenv("PRESERVE_NAMESPACES")) + } + + return preserveNamespacesList +} + +// CleanupClusterLogs cleans up the cluster logs of a given namespace +func CleanupClusterLogs(namespace string, testFailed bool) error { + exists, _ := fileutils.FileExists(path.Join(SternLogDirectory, namespace)) + if exists && !testFailed { + if err := fileutils.RemoveDirectory(path.Join(SternLogDirectory, namespace)); err != nil { + return err + } + } + + return nil +} + +// cleanupNamespace does cleanup duty related to the tear-down of a namespace, +// and is intended to be called in a DeferCleanup clause +func cleanupNamespace( + ctx context.Context, + crudClient client.Client, + namespace, testName string, + testFailed bool, +) error { + if testFailed { + DumpNamespaceObjects(ctx, crudClient, namespace, "out/"+testName+".log") + } + + if len(namespace) == 0 { + return fmt.Errorf("namespace is empty") + } + + if err := CleanupClusterLogs(namespace, testFailed); err != nil { + return err + } + + return deleteNamespace(ctx, crudClient, namespace) +} + +// CreateTestNamespace creates a namespace creates a namespace. +// Prefer CreateUniqueTestNamespace instead, unless you need a +// specific namespace name. If so, make sure there is no collision +// potential. +// The namespace is automatically cleaned up at the end of the test. +func CreateTestNamespace( + ctx context.Context, + crudClient client.Client, + name string, + opts ...client.CreateOption, +) error { + err := CreateNamespace(ctx, crudClient, name, opts...) + if err != nil { + return err + } + + ginkgo.DeferCleanup(func() error { + return cleanupNamespace( + ctx, + crudClient, + name, + ginkgo.CurrentSpecReport().LeafNodeText, + ginkgo.CurrentSpecReport().Failed(), + ) + }) + + return nil +} + +// CreateNamespace creates a namespace. +func CreateNamespace( + ctx context.Context, + crudClient client.Client, + name string, + opts ...client.CreateOption, +) error { + // Exit immediately if the name is empty + if name == "" { + return errors.New("cannot create namespace with empty name") + } + + u := &unstructured.Unstructured{} + u.SetName(name) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Namespace", + }) + _, err := objects.Create(ctx, crudClient, u, opts...) + return err +} + +// EnsureNamespace checks for the presence of a namespace, and if it does not +// exist, creates it +func EnsureNamespace( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { + var nsList corev1.NamespaceList + err := objects.List(ctx, crudClient, &nsList) + if err != nil { + return err + } + for _, ns := range nsList.Items { + if ns.Name == namespace { + return nil + } + } + return CreateNamespace(ctx, crudClient, namespace) +} + +// deleteNamespace deletes a namespace if existent +func deleteNamespace( + ctx context.Context, + crudClient client.Client, + name string, + opts ...client.DeleteOption, +) error { + // Exit immediately if the name is empty + if name == "" { + return errors.New("cannot delete namespace with empty name") + } + + // Exit immediately if the namespace is listed in PreserveNamespaces + for _, v := range getPreserveNamespaces() { + if strings.HasPrefix(name, v) { + return nil + } + } + + u := &unstructured.Unstructured{} + u.SetName(name) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Namespace", + }) + + return objects.Delete(ctx, crudClient, u, opts...) +} + +// DeleteNamespaceAndWait deletes a namespace if existent and returns when deletion is completed +func DeleteNamespaceAndWait( + ctx context.Context, + crudClient client.Client, + name string, + timeoutSeconds int, +) error { + // Exit immediately if the namespace is listed in PreserveNamespaces + for _, v := range getPreserveNamespaces() { + if strings.HasPrefix(name, v) { + return nil + } + } + + ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) + defer cancel() + + err := deleteNamespace(ctx, crudClient, name, client.PropagationPolicy("Background")) + if err != nil { + return err + } + + podList, err := pods.List(ctx, crudClient, name) + if err != nil { + return err + } + + for _, pod := range podList.Items { + err = pods.Delete( + ctx, crudClient, + name, pod.Name, + client.GracePeriodSeconds(1), client.PropagationPolicy("Background"), + ) + if err != nil && !apierrs.IsNotFound(err) { + return err + } + } + + return wait.PollUntilContextCancel(ctx, time.Second, true, + func(ctx context.Context) (bool, error) { + err := crudClient.Get(ctx, client.ObjectKey{Name: name}, &corev1.Namespace{}) + if apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }, + ) +} + +// DumpNamespaceObjects logs the clusters, pods, pvcs etc. found in a namespace as JSON sections +func DumpNamespaceObjects( + ctx context.Context, + crudClient client.Client, + namespace, filename string, +) { + f, err := os.Create(filepath.Clean(filename)) + if err != nil { + fmt.Println(err) + return + } + defer func() { + _ = f.Sync() + _ = f.Close() + }() + w := bufio.NewWriter(f) + clusterList := &apiv1.ClusterList{} + _ = objects.List(ctx, crudClient, clusterList, client.InNamespace(namespace)) + + for _, cluster := range clusterList.Items { + out, _ := json.MarshalIndent(cluster, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v cluster\n", namespace, cluster.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + podList, _ := pods.List(ctx, crudClient, namespace) + for _, pod := range podList.Items { + out, _ := json.MarshalIndent(pod, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + pvcList, _ := storage.GetPVCList(ctx, crudClient, namespace) + for _, pvc := range pvcList.Items { + out, _ := json.MarshalIndent(pvc, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v PVC\n", namespace, pvc.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + jobList := &batchv1.JobList{} + _ = crudClient.List( + ctx, jobList, client.InNamespace(namespace), + ) + for _, job := range jobList.Items { + out, _ := json.MarshalIndent(job, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v job\n", namespace, job.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + eventList, _ := GetEventList(ctx, crudClient, namespace) + out, _ := json.MarshalIndent(eventList.Items, "", " ") + _, _ = fmt.Fprintf(w, "Dumping events for namespace %v\n", namespace) + _, _ = fmt.Fprintln(w, string(out)) + + serviceAccountList, _ := GetServiceAccountList(ctx, crudClient, namespace) + for _, sa := range serviceAccountList.Items { + out, _ := json.MarshalIndent(sa, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v serviceaccount\n", namespace, sa.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + suffixes := []string{"-r", "-rw", "-any"} + for _, cluster := range clusterList.Items { + for _, suffix := range suffixes { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: cluster.Name + suffix, + } + endpointSlice := &discoveryv1.EndpointSlice{} + _ = crudClient.Get(ctx, namespacedName, endpointSlice) + out, _ := json.MarshalIndent(endpointSlice, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v endpointSlice\n", namespace, endpointSlice.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + } + // dump backup info + backupList, _ := backups.List(ctx, crudClient, namespace) + // dump backup object info if it's configure + for _, backup := range backupList.Items { + out, _ := json.MarshalIndent(backup, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v backup\n", namespace, backup.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + // dump scheduledbackup info + scheduledBackupList, _ := GetScheduledBackupList(ctx, crudClient, namespace) + // dump backup object info if it's configure + for _, scheduledBackup := range scheduledBackupList.Items { + out, _ := json.MarshalIndent(scheduledBackup, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v scheduledbackup\n", namespace, scheduledBackup.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + // dump volumesnapshot info + volumeSnaphostList, _ := storage.GetSnapshotList(ctx, crudClient, namespace) + for _, volumeSnapshot := range volumeSnaphostList.Items { + out, _ := json.MarshalIndent(volumeSnapshot, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v VolumeSnapshot\n", namespace, volumeSnapshot.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + err = w.Flush() + if err != nil { + fmt.Println(err) + return + } +} + +// GetServiceAccountList gathers the current list of jobs in a namespace +func GetServiceAccountList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*corev1.ServiceAccountList, error) { + serviceAccountList := &corev1.ServiceAccountList{} + err := crudClient.List( + ctx, serviceAccountList, client.InNamespace(namespace), + ) + return serviceAccountList, err +} + +// GetEventList gathers the current list of events in a namespace +func GetEventList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*v1.EventList, error) { + eventList := &v1.EventList{} + err := crudClient.List( + ctx, eventList, client.InNamespace(namespace), + ) + return eventList, err +} + +// GetScheduledBackupList gathers the current list of scheduledBackup in namespace +func GetScheduledBackupList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*apiv1.ScheduledBackupList, error) { + scheduledBackupList := &apiv1.ScheduledBackupList{} + err := crudClient.List( + ctx, scheduledBackupList, client.InNamespace(namespace), + ) + return scheduledBackupList, err +} diff --git a/tests/utils/nodes/drain.go b/tests/utils/nodes/drain.go deleted file mode 100644 index dc2ede03c4..0000000000 --- a/tests/utils/nodes/drain.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nodes contains the helper methods/functions for nodes -package nodes - -import ( - "fmt" - - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" - - . "github.com/onsi/ginkgo/v2" //nolint - . "github.com/onsi/gomega" //nolint -) - -// DrainPrimaryNode drains the node containing the primary pod. -// It returns the names of the pods that were running on that node -func DrainPrimaryNode( - namespace, - clusterName string, - timeoutSeconds int, - env *utils.TestingEnvironment, -) []string { - var primaryNode string - var podNames []string - By("identifying primary node and draining", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - primaryNode = pod.Spec.NodeName - - // Gather the pods running on this node - podList, err := env.GetClusterPodList(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - for _, pod := range podList.Items { - if pod.Spec.NodeName == primaryNode { - podNames = append(podNames, pod.Name) - } - } - - // Draining the primary pod's node - var stdout, stderr string - Eventually(func() error { - cmd := fmt.Sprintf("kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds", - primaryNode, timeoutSeconds) - stdout, stderr, err = utils.RunUnchecked(cmd) - return err - }, timeoutSeconds).ShouldNot(HaveOccurred(), fmt.Sprintf("stdout: %s, stderr: %s", stdout, stderr)) - }) - By("ensuring no cluster pod is still running on the drained node", func() { - Eventually(func() ([]string, error) { - var usedNodes []string - podList, err := env.GetClusterPodList(namespace, clusterName) - for _, pod := range podList.Items { - usedNodes = append(usedNodes, pod.Spec.NodeName) - } - return usedNodes, err - }, 60).ShouldNot(ContainElement(primaryNode)) - }) - - return podNames -} - -// UncordonAllNodes executes the 'kubectl uncordon' command on each node of the list -func UncordonAllNodes(env *utils.TestingEnvironment) error { - nodeList, err := env.GetNodeList() - if err != nil { - return err - } - for _, node := range nodeList.Items { - command := fmt.Sprintf("kubectl uncordon %v", node.Name) - _, _, err = utils.Run(command) - if err != nil { - return err - } - } - return nil -} diff --git a/tests/utils/nodes/nodes.go b/tests/utils/nodes/nodes.go new file mode 100644 index 0000000000..c07a75186b --- /dev/null +++ b/tests/utils/nodes/nodes.go @@ -0,0 +1,163 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package nodes contains the helper methods/functions for nodes +package nodes + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + + . "github.com/onsi/ginkgo/v2" //nolint + . "github.com/onsi/gomega" //nolint +) + +// DrainPrimary drains the node containing the primary pod. +// It returns the names of the pods that were running on that node +func DrainPrimary( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + timeoutSeconds int, +) []string { + var primaryNode string + var podNames []string + By("identifying primary node and draining", func() { + pod, err := clusterutils.GetPrimary(ctx, crudClient, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + primaryNode = pod.Spec.NodeName + + // Gather the pods running on this node + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + for _, pod := range podList.Items { + if pod.Spec.NodeName == primaryNode { + podNames = append(podNames, pod.Name) + } + } + + // Draining the primary pod's node + var stdout, stderr string + Eventually(func() error { + cmd := fmt.Sprintf("kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds", + primaryNode, timeoutSeconds) + stdout, stderr, err = run.Unchecked(cmd) + return err + }, timeoutSeconds).ShouldNot(HaveOccurred(), fmt.Sprintf("stdout: %s, stderr: %s", stdout, stderr)) + }) + By("ensuring no cluster pod is still running on the drained node", func() { + Eventually(func() ([]string, error) { + var usedNodes []string + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) + for _, pod := range podList.Items { + usedNodes = append(usedNodes, pod.Spec.NodeName) + } + return usedNodes, err + }, 60).ShouldNot(ContainElement(primaryNode)) + }) + + return podNames +} + +// UncordonAll executes the 'kubectl uncordon' command on each node of the list +func UncordonAll( + ctx context.Context, + crudClient client.Client, +) error { + nodeList, err := List(ctx, crudClient) + if err != nil { + return err + } + for _, node := range nodeList.Items { + command := fmt.Sprintf("kubectl uncordon %v", node.Name) + _, _, err = run.Run(command) + if err != nil { + return err + } + } + return nil +} + +// List gathers the current list of Nodes +func List( + ctx context.Context, + crudClient client.Client, +) (*v1.NodeList, error) { + nodeList := &v1.NodeList{} + err := crudClient.List(ctx, nodeList, client.InNamespace("")) + return nodeList, err +} + +// DescribeKubernetesNodes prints the `describe node` for each node in the +// kubernetes cluster +func DescribeKubernetesNodes(ctx context.Context, crudClient client.Client) (string, error) { + nodeList, err := List(ctx, crudClient) + if err != nil { + return "", err + } + var report strings.Builder + for _, node := range nodeList.Items { + command := fmt.Sprintf("kubectl describe node %v", node.Name) + stdout, _, err := run.Run(command) + if err != nil { + return "", err + } + report.WriteString("================================================\n") + report.WriteString(stdout) + report.WriteString("================================================\n") + } + return report.String(), nil +} + +// IsNodeReachable checks if a node is: +// 1. Ready +// 2. Not tainted with the unreachable taint +func IsNodeReachable( + ctx context.Context, + crudClient client.Client, + nodeName string, +) (bool, error) { + node := &v1.Node{} + err := crudClient.Get(ctx, client.ObjectKey{Name: nodeName}, node) + if err != nil { + return false, err + } + for _, condition := range node.Status.Conditions { + if condition.Type == v1.NodeReady && condition.Status == v1.ConditionFalse { + return false, nil + } + } + + // check that the node does not have the unreachable taint + for _, taint := range node.Spec.Taints { + if taint.Key == v1.TaintNodeUnreachable { + return false, nil + } + } + + return true, nil +} diff --git a/tests/utils/objects/objects.go b/tests/utils/objects/objects.go new file mode 100644 index 0000000000..3f08903fd5 --- /dev/null +++ b/tests/utils/objects/objects.go @@ -0,0 +1,120 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package objects provides functions to manage pure objects in Kubernetes +package objects + +import ( + "context" + "time" + + "github.com/avast/retry-go/v4" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // RetryAttempts maximum number of attempts when it fails in `retry`. Mainly used in `RunUncheckedRetry` + RetryAttempts = 5 + + // PollingTime polling interval (in seconds) between retries + PollingTime = 5 +) + +// Create creates object in the Kubernetes cluster +func Create( + ctx context.Context, + crudClient client.Client, + object client.Object, + opts ...client.CreateOption, +) (client.Object, error) { + err := retry.Do( + func() error { + return crudClient.Create(ctx, object, opts...) + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + retry.RetryIf(func(err error) bool { return !errors.IsAlreadyExists(err) }), + ) + return object, err +} + +// Delete deletes an object in the Kubernetes cluster +func Delete( + ctx context.Context, + crudClient client.Client, + object client.Object, + opts ...client.DeleteOption, +) error { + err := retry.Do( + func() error { + return crudClient.Delete(ctx, object, opts...) + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + retry.RetryIf(func(err error) bool { return !errors.IsNotFound(err) }), + ) + return err +} + +// List retrieves a list of objects +func List( + ctx context.Context, + crudClient client.Client, + objectList client.ObjectList, + opts ...client.ListOption, +) error { + err := retry.Do( + func() error { + err := crudClient.List(ctx, objectList, opts...) + if err != nil { + return err + } + return nil + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + ) + return err +} + +// Get retrieves an object for the given object key from the Kubernetes Cluster +func Get( + ctx context.Context, + crudClient client.Client, + objectKey client.ObjectKey, + object client.Object, +) error { + err := retry.Do( + func() error { + err := crudClient.Get(ctx, objectKey, object) + if err != nil { + return err + } + return nil + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + ) + return err +} diff --git a/tests/utils/openshift.go b/tests/utils/openshift/openshift.go similarity index 68% rename from tests/utils/openshift.go rename to tests/utils/openshift/openshift.go index 769ff8c413..be3d35a150 100644 --- a/tests/utils/openshift.go +++ b/tests/utils/openshift/openshift.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package openshift provides functions to work with OLM CRDs +package openshift import ( + "context" "fmt" "strings" @@ -27,14 +32,20 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) -// GetSubscription returns an unstructured subscription object -func GetSubscription(env *TestingEnvironment) (*unstructured.Unstructured, error) { +// getSubscription returns an unstructured subscription object +func getSubscription( + ctx context.Context, + crudClient client.Client, +) (*unstructured.Unstructured, error) { subscription := &unstructured.Unstructured{} subscription.SetName("cloudnative-pg") subscription.SetNamespace("openshift-operators") @@ -43,13 +54,16 @@ func GetSubscription(env *TestingEnvironment) (*unstructured.Unstructured, error Version: "v1alpha1", Kind: "Subscription", }) - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKeyFromObject(subscription), subscription) + err := crudClient.Get(ctx, client.ObjectKeyFromObject(subscription), subscription) return subscription, err } // GetSubscriptionVersion retrieves the current ClusterServiceVersion version of the operator -func GetSubscriptionVersion(env *TestingEnvironment) (string, error) { - subscription, err := GetSubscription(env) +func GetSubscriptionVersion( + ctx context.Context, + crudClient client.Client, +) (string, error) { + subscription, err := getSubscription(ctx, crudClient) if err != nil { return "", err } @@ -65,17 +79,21 @@ func GetSubscriptionVersion(env *TestingEnvironment) (string, error) { } // PatchStatusCondition removes status conditions on a given Cluster -func PatchStatusCondition(namespace, clusterName string, env *TestingEnvironment) error { +func PatchStatusCondition( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) error { cluster := &apiv1.Cluster{} var err error err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return err } clusterNoConditions := cluster.DeepCopy() clusterNoConditions.Status.Conditions = nil - return env.Client.Patch(env.Ctx, clusterNoConditions, ctrlclient.MergeFrom(cluster)) + return crudClient.Patch(ctx, clusterNoConditions, client.MergeFrom(cluster)) }) if err != nil { return err @@ -84,8 +102,8 @@ func PatchStatusCondition(namespace, clusterName string, env *TestingEnvironment } // GetOpenshiftVersion returns the current openshift version -func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) { - client, err := dynamic.NewForConfig(env.RestClientConfig) +func GetOpenshiftVersion(ctx context.Context, restConfig *rest.Config) (semver.Version, error) { + client, err := dynamic.NewForConfig(restConfig) if err != nil { return semver.Version{}, err } @@ -94,7 +112,7 @@ func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) { Group: "operator.openshift.io", Version: "v1", Resource: "openshiftcontrollermanagers", - }).Get(env.Ctx, "cluster", v1.GetOptions{}) + }).Get(ctx, "cluster", v1.GetOptions{}) if err != nil { return semver.Version{}, err } @@ -108,7 +126,11 @@ func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) { } // CreateSubscription creates a subscription object inside openshift with a fixed name -func CreateSubscription(env *TestingEnvironment, channel string) error { +func CreateSubscription( + ctx context.Context, + crudClient client.Client, + channel string, +) error { u := &unstructured.Unstructured{} u.SetName("cloudnative-pg") u.SetNamespace("openshift-operators") @@ -131,12 +153,15 @@ func CreateSubscription(env *TestingEnvironment, channel string) error { return err } - _, err = CreateObject(env, u) + _, err = objects.Create(ctx, crudClient, u) return err } // DeleteSubscription deletes the operator's subscription object -func DeleteSubscription(env *TestingEnvironment) error { +func DeleteSubscription( + ctx context.Context, + crudClient client.Client, +) error { u := &unstructured.Unstructured{} u.SetName("cloudnative-pg") u.SetNamespace("openshift-operators") @@ -146,7 +171,7 @@ func DeleteSubscription(env *TestingEnvironment) error { Kind: "Subscription", }) - err := DeleteObject(env, u) + err := objects.Delete(ctx, crudClient, u) if apierrors.IsNotFound(err) { return nil } @@ -155,7 +180,10 @@ func DeleteSubscription(env *TestingEnvironment) error { } // DeleteOperatorCRDs deletes the CRDs associated with the operator -func DeleteOperatorCRDs(env *TestingEnvironment) error { +func DeleteOperatorCRDs( + ctx context.Context, + crudClient client.Client, +) error { u := &unstructured.Unstructured{} u.SetName("clusters.postgresql.cnpg.io") u.SetGroupVersionKind(schema.GroupVersionKind{ @@ -163,22 +191,22 @@ func DeleteOperatorCRDs(env *TestingEnvironment) error { Version: "v1", Kind: "CustomResourceDefinition", }) - err := DeleteObject(env, u) + err := objects.Delete(ctx, crudClient, u) if !apierrors.IsNotFound(err) { return err } u.SetName("backups.postgresql.cnpg.io") - err = DeleteObject(env, u) + err = objects.Delete(ctx, crudClient, u) if !apierrors.IsNotFound(err) { return err } u.SetName("poolers.postgresql.cnpg.io") - err = DeleteObject(env, u) + err = objects.Delete(ctx, crudClient, u) if !apierrors.IsNotFound(err) { return err } u.SetName("scheduledbackups.postgresql.cnpg.io") - err = DeleteObject(env, u) + err = objects.Delete(ctx, crudClient, u) if apierrors.IsNotFound(err) { return nil } @@ -186,7 +214,10 @@ func DeleteOperatorCRDs(env *TestingEnvironment) error { } // DeleteCSV will delete all operator's CSVs -func DeleteCSV(env *TestingEnvironment) error { +func DeleteCSV( + ctx context.Context, + crudClient client.Client, +) error { ol := &unstructured.UnstructuredList{} ol.SetGroupVersionKind(schema.GroupVersionKind{ Group: "operators.coreos.com", @@ -196,12 +227,12 @@ func DeleteCSV(env *TestingEnvironment) error { labelSelector := labels.SelectorFromSet(map[string]string{ "operators.coreos.com/cloudnative-pg.openshift-operators": "", }) - err := GetObjectList(env, ol, ctrlclient.MatchingLabelsSelector{Selector: labelSelector}) + err := objects.List(ctx, crudClient, ol, client.MatchingLabelsSelector{Selector: labelSelector}) if err != nil { return err } for _, o := range ol.Items { - err = DeleteObject(env, &o) + err = objects.Delete(ctx, crudClient, &o) if err != nil { if apierrors.IsNotFound(err) { continue @@ -213,8 +244,12 @@ func DeleteCSV(env *TestingEnvironment) error { } // UpgradeSubscription patch an unstructured subscription object with target channel -func UpgradeSubscription(env *TestingEnvironment, channel string) error { - subscription, err := GetSubscription(env) +func UpgradeSubscription( + ctx context.Context, + crudClient client.Client, + channel string, +) error { + subscription, err := getSubscription(ctx, crudClient) if err != nil { return err } @@ -225,5 +260,5 @@ func UpgradeSubscription(env *TestingEnvironment, channel string) error { return err } - return env.Client.Patch(env.Ctx, newSubscription, ctrlclient.MergeFrom(subscription)) + return crudClient.Patch(ctx, newSubscription, client.MergeFrom(subscription)) } diff --git a/tests/utils/operator.go b/tests/utils/operator.go deleted file mode 100644 index 7ab479b09c..0000000000 --- a/tests/utils/operator.go +++ /dev/null @@ -1,356 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bufio" - "encoding/json" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/avast/retry-go/v4" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// ReloadOperatorDeployment finds and deletes the operator pod. Returns -// error if the new pod is not ready within a defined timeout -func ReloadOperatorDeployment(env *TestingEnvironment, timeoutSeconds uint) error { - operatorPod, err := env.GetOperatorPod() - if err != nil { - return err - } - zero := int64(0) - err = env.Client.Delete(env.Ctx, &operatorPod, - &ctrlclient.DeleteOptions{GracePeriodSeconds: &zero}, - ) - if err != nil { - return err - } - err = retry.Do( - func() error { - ready, err := env.IsOperatorReady() - if err != nil { - return err - } - if !ready { - return fmt.Errorf("operator pod %v is not ready", operatorPod.Name) - } - return nil - }, - retry.Delay(time.Second), - retry.Attempts(timeoutSeconds), - ) - return err -} - -// DumpOperator logs the JSON for the deployment in an operator namespace, its pods and endpoints -func (env TestingEnvironment) DumpOperator(namespace string, filename string) { - f, err := os.Create(filepath.Clean(filename)) - if err != nil { - fmt.Println(err) - return - } - w := bufio.NewWriter(f) - - deployment, _ := env.GetOperatorDeployment() - out, _ := json.MarshalIndent(deployment, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v deployment\n", namespace, deployment.Name) - _, _ = fmt.Fprintln(w, string(out)) - - podList, _ := env.GetPodList(namespace) - for _, pod := range podList.Items { - out, _ := json.MarshalIndent(pod, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - err = w.Flush() - if err != nil { - fmt.Println(err) - return - } - _ = f.Sync() - _ = f.Close() -} - -// GetOperatorDeployment returns the operator Deployment if there is a single one running, error otherwise -func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error) { - deploymentList := &appsv1.DeploymentList{} - if err := GetObjectList(&env, deploymentList, - ctrlclient.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}, - ); err != nil { - return appsv1.Deployment{}, err - } - // We check if we have one or more deployments - switch { - case len(deploymentList.Items) > 1: - err := fmt.Errorf("number of operator deployments != 1") - return appsv1.Deployment{}, err - case len(deploymentList.Items) == 1: - return deploymentList.Items[0], nil - } - - if err := GetObjectList( - &env, - deploymentList, - ctrlclient.HasLabels{"operators.coreos.com/cloudnative-pg.openshift-operators"}, - ); err != nil { - return appsv1.Deployment{}, err - } - - // We check if we have one or more deployments - switch { - case len(deploymentList.Items) > 1: - err := fmt.Errorf("number of operator deployments != 1") - return appsv1.Deployment{}, err - case len(deploymentList.Items) == 1: - return deploymentList.Items[0], nil - } - - return deploymentList.Items[0], nil -} - -// GetOperatorPod returns the operator pod if there is a single one running, error otherwise -func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) { - podList := &corev1.PodList{} - - // This will work for newer version of the operator, which are using - // our custom label - if err := GetObjectList( - &env, podList, ctrlclient.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}); err != nil { - return corev1.Pod{}, err - } - activePods := utils.FilterActivePods(podList.Items) - switch { - case len(activePods) > 1: - err := fmt.Errorf("number of running operator pods greater than 1: %v pods running", len(activePods)) - return corev1.Pod{}, err - - case len(activePods) == 1: - return activePods[0], nil - } - - operatorNamespace, err := env.GetOperatorNamespaceName() - if err != nil { - return corev1.Pod{}, err - } - - // This will work for older version of the operator, which are using - // the default label from kube-builder - if err := GetObjectList( - &env, podList, - ctrlclient.MatchingLabels{"control-plane": "controller-manager"}, - ctrlclient.InNamespace(operatorNamespace)); err != nil { - return corev1.Pod{}, err - } - activePods = utils.FilterActivePods(podList.Items) - if len(activePods) != 1 { - err := fmt.Errorf("number of running operator different than 1: %v pods running", len(activePods)) - return corev1.Pod{}, err - } - - return podList.Items[0], nil -} - -// GetOperatorNamespaceName returns the namespace the operator Deployment is running in -func (env TestingEnvironment) GetOperatorNamespaceName() (string, error) { - deployment, err := env.GetOperatorDeployment() - if err != nil { - return "", err - } - return deployment.GetNamespace(), err -} - -// IsOperatorReady ensures that the operator will be ready. -func (env TestingEnvironment) IsOperatorReady() (bool, error) { - pod, err := env.GetOperatorPod() - if err != nil { - return false, err - } - - isPodReady := utils.IsPodReady(pod) - if !isPodReady { - return false, err - } - - namespace := pod.Namespace - - // Detect if we are running under OLM - var webhookManagedByOLM bool - for _, envVar := range pod.Spec.Containers[0].Env { - if envVar.Name == "WEBHOOK_CERT_DIR" { - webhookManagedByOLM = true - } - } - - // If the operator is managing certificates for webhooks, check that the setup is completed - if !webhookManagedByOLM { - err = CheckWebhookReady(&env, namespace) - if err != nil { - return false, err - } - } - - // Dry run object creation to check that webhook Service is correctly running - testCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "readiness-check-" + rand.String(5), - Namespace: "default", - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - }, - } - _, err = CreateObject(&env, testCluster, &ctrlclient.CreateOptions{DryRun: []string{metav1.DryRunAll}}) - if err != nil { - return false, err - } - - return true, err -} - -// IsOperatorDeploymentReady returns true if the operator deployment has the expected number -// of ready pods. -// It returns an error if there was a problem getting the operator deployment -func (env *TestingEnvironment) IsOperatorDeploymentReady() (bool, error) { - operatorDeployment, err := env.GetOperatorDeployment() - if err != nil { - return false, err - } - - if operatorDeployment.Spec.Replicas != nil && - operatorDeployment.Status.ReadyReplicas != *operatorDeployment.Spec.Replicas { - return false, fmt.Errorf("deployment not ready %v of %v ready", - operatorDeployment.Status.ReadyReplicas, operatorDeployment.Status.ReadyReplicas) - } - - return true, nil -} - -// ScaleOperatorDeployment will scale the operator to n replicas and return error in case of failure -func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error { - operatorDeployment, err := env.GetOperatorDeployment() - if err != nil { - return err - } - - updatedOperatorDeployment := *operatorDeployment.DeepCopy() - updatedOperatorDeployment.Spec.Replicas = ptr.To(replicas) - - // Scale down operator deployment to zero replicas - err = env.Client.Patch(env.Ctx, &updatedOperatorDeployment, ctrlclient.MergeFrom(&operatorDeployment)) - if err != nil { - return err - } - - return retry.Do( - func() error { - _, err := env.IsOperatorDeploymentReady() - return err - }, - retry.Delay(time.Second), - retry.Attempts(120), - ) -} - -// OperatorPodRenamed checks if the operator pod was renamed -func OperatorPodRenamed(operatorPod corev1.Pod, expectedOperatorPodName string) bool { - return operatorPod.GetName() != expectedOperatorPodName -} - -// OperatorPodRestarted checks if the operator pod was restarted -func OperatorPodRestarted(operatorPod corev1.Pod) bool { - restartCount := 0 - for _, containerStatus := range operatorPod.Status.ContainerStatuses { - if containerStatus.Name == "manager" { - restartCount = int(containerStatus.RestartCount) - } - } - return restartCount != 0 -} - -// GetOperatorPodName returns the name of the current operator pod -// NOTE: will return an error if the pod is being deleted -func GetOperatorPodName(env *TestingEnvironment) (string, error) { - pod, err := env.GetOperatorPod() - if err != nil { - return "", err - } - - if pod.GetDeletionTimestamp() != nil { - return "", fmt.Errorf("pod is being deleted") - } - return pod.GetName(), nil -} - -// HasOperatorBeenUpgraded determines if the operator has been upgraded by checking -// if there is a deletion timestamp. If there isn't, it returns true -func HasOperatorBeenUpgraded(env *TestingEnvironment) bool { - _, err := GetOperatorPodName(env) - return err == nil -} - -// GetOperatorVersion returns the current operator version -func GetOperatorVersion(namespace, podName string) (string, error) { - out, _, err := RunUnchecked(fmt.Sprintf( - "kubectl -n %v exec %v -c manager -- /manager version", - namespace, - podName, - )) - if err != nil { - return "", err - } - versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`) - ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1] - return ver, nil -} - -// GetOperatorArchitectures returns all the supported operator architectures -func GetOperatorArchitectures(operatorPod *corev1.Pod) ([]string, error) { - out, _, err := RunUnchecked(fmt.Sprintf( - "kubectl -n %v exec %v -c manager -- /manager debug show-architectures", - operatorPod.Namespace, - operatorPod.Name, - )) - if err != nil { - return nil, err - } - - // `debug show-architectures` will print a JSON object - var res []string - err = json.Unmarshal([]byte(out), &res) - if err != nil { - return nil, err - } - - return res, err -} diff --git a/tests/utils/operator/doc.go b/tests/utils/operator/doc.go new file mode 100644 index 0000000000..7680695d8c --- /dev/null +++ b/tests/utils/operator/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package operator provides functions to handle and manage the operator +package operator diff --git a/tests/utils/operator/operator.go b/tests/utils/operator/operator.go new file mode 100644 index 0000000000..fe333045dd --- /dev/null +++ b/tests/utils/operator/operator.go @@ -0,0 +1,347 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package operator + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/avast/retry-go/v4" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" +) + +// ReloadDeployment finds and deletes the operator pod. Returns +// error if the new pod is not ready within a defined timeout +func ReloadDeployment( + ctx context.Context, + crudClient client.Client, + timeoutSeconds uint, +) error { + operatorPod, err := GetPod(ctx, crudClient) + if err != nil { + return err + } + + err = crudClient.Delete(ctx, &operatorPod, + &client.DeleteOptions{GracePeriodSeconds: ptr.To(int64(1))}, + ) + if err != nil { + return err + } + // Wait for the operator pod to be ready + return WaitForReady(ctx, crudClient, timeoutSeconds, true) +} + +// Dump logs the JSON for the deployment in an operator namespace, its pods and endpoints +func Dump(ctx context.Context, crudClient client.Client, namespace, filename string) { + f, err := os.Create(filepath.Clean(filename)) + if err != nil { + fmt.Println(err) + return + } + w := bufio.NewWriter(f) + + deployment, _ := GetDeployment(ctx, crudClient) + out, _ := json.MarshalIndent(deployment, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v deployment\n", namespace, deployment.Name) + _, _ = fmt.Fprintln(w, string(out)) + + podList, _ := pods.List(ctx, crudClient, namespace) + for _, pod := range podList.Items { + out, _ := json.MarshalIndent(pod, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + err = w.Flush() + if err != nil { + fmt.Println(err) + return + } + _ = f.Sync() + _ = f.Close() +} + +// GetDeployment returns the operator Deployment if there is a single one running, error otherwise +func GetDeployment(ctx context.Context, crudClient client.Client) (appsv1.Deployment, error) { + deploymentList := &appsv1.DeploymentList{} + if err := objects.List(ctx, crudClient, deploymentList, + client.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}, + ); err != nil { + return appsv1.Deployment{}, err + } + // We check if we have one or more deployments + switch { + case len(deploymentList.Items) > 1: + err := fmt.Errorf("number of operator deployments != 1") + return appsv1.Deployment{}, err + case len(deploymentList.Items) == 1: + return deploymentList.Items[0], nil + } + + if err := objects.List( + ctx, + crudClient, + deploymentList, + client.HasLabels{"operators.coreos.com/cloudnative-pg.openshift-operators"}, + ); err != nil { + return appsv1.Deployment{}, err + } + + // We check if we have one or more deployments + switch { + case len(deploymentList.Items) > 1: + err := fmt.Errorf("number of operator deployments != 1") + return appsv1.Deployment{}, err + case len(deploymentList.Items) == 1: + return deploymentList.Items[0], nil + } + + return deploymentList.Items[0], nil +} + +// GetPod returns the operator pod if there is a single one running, error otherwise +func GetPod(ctx context.Context, crudClient client.Client) (corev1.Pod, error) { + podList := &corev1.PodList{} + + // This will work for newer versions of the operator, which are using + // our custom label + if err := objects.List( + ctx, crudClient, + podList, client.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}); err != nil { + return corev1.Pod{}, err + } + activePods := utils.FilterActivePods(podList.Items) + if len(activePods) != 1 { + err := fmt.Errorf("number of running operator different than 1: %v pods running", len(activePods)) + return corev1.Pod{}, err + } + + return podList.Items[0], nil +} + +// NamespaceName returns the namespace the operator Deployment is running in +func NamespaceName(ctx context.Context, crudClient client.Client) (string, error) { + deployment, err := GetDeployment(ctx, crudClient) + if err != nil { + return "", err + } + return deployment.GetNamespace(), err +} + +// IsReady ensures that the operator will be ready. +func IsReady( + ctx context.Context, + crudClient client.Client, + checkWebhook bool, +) (bool, error) { + if ready, err := isDeploymentReady(ctx, crudClient); err != nil || !ready { + return ready, err + } + + // If the operator is not managing webhooks, we don't need to check. Exit early + if !checkWebhook { + return true, nil + } + + deploy, err := GetDeployment(ctx, crudClient) + if err != nil { + return false, err + } + namespace := deploy.GetNamespace() + + // Detect if we are running under OLM + var webhookManagedByOLM bool + for _, envVar := range deploy.Spec.Template.Spec.Containers[0].Env { + if envVar.Name == "WEBHOOK_CERT_DIR" { + webhookManagedByOLM = true + } + } + + // If the operator is managing certificates for webhooks, check that the setup is completed + if !webhookManagedByOLM { + err = checkWebhookSetup(ctx, crudClient, namespace) + if err != nil { + return false, err + } + } + + return isWebhookWorking(ctx, crudClient) +} + +// WaitForReady waits for the operator deployment to be ready. +// If checkWebhook is true, it will also check that the webhook is replying +func WaitForReady( + ctx context.Context, + crudClient client.Client, + timeoutSeconds uint, + checkWebhook bool, +) error { + return retry.Do( + func() error { + ready, err := IsReady(ctx, crudClient, checkWebhook) + if err != nil || !ready { + return fmt.Errorf("operator deployment is not ready") + } + return nil + }, + retry.Delay(time.Second), + retry.Attempts(timeoutSeconds), + ) +} + +// isDeploymentReady returns true if the operator deployment has the expected number +// of ready pods. +// It returns an error if there was a problem getting the operator deployment +func isDeploymentReady(ctx context.Context, crudClient client.Client) (bool, error) { + operatorDeployment, err := GetDeployment(ctx, crudClient) + if err != nil { + return false, err + } + + return deployments.IsReady(operatorDeployment), nil +} + +// ScaleOperatorDeployment will scale the operator to n replicas and return an error in case of failure +func ScaleOperatorDeployment( + ctx context.Context, crudClient client.Client, replicas int32, +) error { + operatorDeployment, err := GetDeployment(ctx, crudClient) + if err != nil { + return err + } + + updatedOperatorDeployment := *operatorDeployment.DeepCopy() + updatedOperatorDeployment.Spec.Replicas = ptr.To(replicas) + + err = crudClient.Patch(ctx, &updatedOperatorDeployment, client.MergeFrom(&operatorDeployment)) + if err != nil { + return err + } + + // Wait for the operator deployment to be ready + return WaitForReady(ctx, crudClient, 120, replicas > 0) +} + +// PodRenamed checks if the operator pod was renamed +func PodRenamed(operatorPod corev1.Pod, expectedOperatorPodName string) bool { + return operatorPod.GetName() != expectedOperatorPodName +} + +// PodRestarted checks if the operator pod was restarted +func PodRestarted(operatorPod corev1.Pod) bool { + restartCount := 0 + for _, containerStatus := range operatorPod.Status.ContainerStatuses { + if containerStatus.Name == "manager" { + restartCount = int(containerStatus.RestartCount) + } + } + return restartCount != 0 +} + +// GetPodName returns the name of the current operator pod +// NOTE: will return an error if the pod is being deleted +func GetPodName(ctx context.Context, crudClient client.Client) (string, error) { + pod, err := GetPod(ctx, crudClient) + if err != nil { + return "", err + } + + if pod.GetDeletionTimestamp() != nil { + return "", fmt.Errorf("pod is being deleted") + } + return pod.GetName(), nil +} + +// HasBeenUpgraded determines if the operator has been upgraded by checking +// if there is a deletion timestamp. If there isn't, it returns true +func HasBeenUpgraded(ctx context.Context, crudClient client.Client) bool { + _, err := GetPodName(ctx, crudClient) + return err == nil +} + +// Version returns the current operator version +func Version(namespace, podName string) (string, error) { + out, _, err := run.Unchecked(fmt.Sprintf( + "kubectl -n %v exec %v -c manager -- /manager version", + namespace, + podName, + )) + if err != nil { + return "", err + } + versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`) + ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1] + return ver, nil +} + +// Architectures returns all the supported operator architectures +func Architectures(operatorPod *corev1.Pod) ([]string, error) { + out, _, err := run.Unchecked(fmt.Sprintf( + "kubectl -n %v exec %v -c manager -- /manager debug show-architectures", + operatorPod.Namespace, + operatorPod.Name, + )) + if err != nil { + return nil, err + } + + // `debug show-architectures` will print a JSON object + var res []string + err = json.Unmarshal([]byte(out), &res) + if err != nil { + return nil, err + } + + return res, err +} + +// GetLeaderInfoFromLease gathers leader holderIdentity from the lease +func GetLeaderInfoFromLease( + ctx context.Context, + kubeInterface kubernetes.Interface, + operatorNamespace string, +) (string, error) { + leaseInterface := kubeInterface.CoordinationV1().Leases(operatorNamespace) + lease, err := leaseInterface.Get(ctx, controller.LeaderElectionID, metav1.GetOptions{}) + if err != nil { + return "", err + } + return *lease.Spec.HolderIdentity, nil +} diff --git a/tests/utils/release.go b/tests/utils/operator/release.go similarity index 78% rename from tests/utils/release.go rename to tests/utils/operator/release.go index e92dc961a4..10c9a4cb86 100644 --- a/tests/utils/release.go +++ b/tests/utils/operator/release.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +13,20 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -// Package utils contains helper functions/methods for e2e -package utils +package operator import ( "errors" + "fmt" "io/fs" "os" "os/exec" + "regexp" + "slices" "sort" "strings" @@ -73,14 +78,19 @@ func GetAvailableReleases(releasesPath string) ([]*semver.Version, error) { // build the array that contains the versions // found in the releasePath directory for i, file := range validFiles { - tag := extractTag(file.Name()) + tag, err := extractTag(file.Name()) + if err != nil { + continue + } versions[i] = semver.MustParse(tag) } // Sorting version as descending order ([v1.10.0, v1.9.0...]) sort.Sort(sort.Reverse(semver.Collection(versions))) - return versions, nil + return slices.CompactFunc(versions, func(a, b *semver.Version) bool { + return a.Equal(b) + }), nil } func isReleasePullRequestBranch() bool { @@ -95,9 +105,13 @@ func isReleasePullRequestBranch() bool { return strings.HasPrefix(branchName, "release/v") } -func extractTag(releaseFile string) string { - releaseFile = strings.TrimPrefix(releaseFile, "cnpg-") - tag := strings.TrimSuffix(releaseFile, ".yaml") +var extractTagRegex = regexp.MustCompile(`-(\d+\.\d+\.\d+).yaml$`) - return tag +func extractTag(releaseFile string) (string, error) { + matches := extractTagRegex.FindStringSubmatch(releaseFile) + if len(matches) == 0 { + return "", fmt.Errorf("could not extract tag from filename %s", releaseFile) + } + // since the regex is matched, the second fragment contains the submatch + return matches[1], nil } diff --git a/tests/utils/release_test.go b/tests/utils/operator/release_test.go similarity index 58% rename from tests/utils/release_test.go rename to tests/utils/operator/release_test.go index 7126d586e8..090a91c2eb 100644 --- a/tests/utils/release_test.go +++ b/tests/utils/operator/release_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package operator import ( + "os" "path/filepath" "strings" @@ -26,16 +30,24 @@ import ( . "github.com/onsi/gomega" ) +const releaseDirectoryPath = "../../../releases" + var _ = Describe("Release tag extraction", func() { It("properly works with expected filename", func() { - tag := extractTag("cnpg-0.5.0.yaml") + tag, err := extractTag("cnpg-0.5.0.yaml") + Expect(err).ToNot(HaveOccurred()) + Expect(tag).To(Equal("0.5.0")) + }) + It("properly works with a different prefix", func() { + tag, err := extractTag("modified-manifest-0.5.0.yaml") + Expect(err).ToNot(HaveOccurred()) Expect(tag).To(Equal("0.5.0")) }) }) var _ = Describe("Most recent tag", func() { It("properly works with release branch", func() { - releasesDir, err := filepath.Abs("../../releases") + releasesDir, err := filepath.Abs(releaseDirectoryPath) Expect(err).ToNot(HaveOccurred()) versionList, err := GetAvailableReleases(releasesDir) @@ -53,7 +65,7 @@ var _ = Describe("Most recent tag", func() { }) It("properly works with dev branch", func() { - releasesDir, err := filepath.Abs("../../releases") + releasesDir, err := filepath.Abs(releaseDirectoryPath) Expect(err).ToNot(HaveOccurred()) GinkgoT().Setenv("BRANCH_NAME", "dev/"+versions.Version) @@ -85,4 +97,44 @@ var _ = Describe("GetAvailableReleases fails on wrong release directory", func() _, err := GetMostRecentReleaseTag(tmpDir) Expect(err).To(HaveOccurred()) }) + + It("properly deduplicate releases", func() { + tmpDir := GinkgoT().TempDir() + + for _, file := range []string{ + "cnpg-0.5.0.yaml", + "cnpg-0.5.1.yaml", + "cnpg-0.6.0.yaml", + "mangled-cnpg-0.5.1.yaml", + } { + f, err := os.Create(filepath.Clean(filepath.Join(tmpDir, file))) + Expect(err).ToNot(HaveOccurred()) + Expect(f.Close()).ToNot(HaveOccurred()) + } + + versions, err := GetAvailableReleases(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(versions).To(HaveLen(3)) + Expect(versions[0].String()).To(Equal("0.6.0")) + Expect(versions[1].String()).To(Equal("0.5.1")) + Expect(versions[2].String()).To(Equal("0.5.0")) + }) + + It("properly ignore rc versions", func() { + tmpDir := GinkgoT().TempDir() + + for _, file := range []string{ + "cnpg-0.5.0.yaml", + "cnpg-0.5.1.yaml", + "cnpg-0.6.0-rc1.yaml", + } { + f, err := os.Create(filepath.Clean(filepath.Join(tmpDir, file))) + Expect(err).ToNot(HaveOccurred()) + Expect(f.Close()).ToNot(HaveOccurred()) + } + + latest, err := GetMostRecentReleaseTag(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(latest).To(Equal("0.5.1")) + }) }) diff --git a/internal/cmd/manager/walarchive/suite_test.go b/tests/utils/operator/suite_test.go similarity index 77% rename from internal/cmd/manager/walarchive/suite_test.go rename to tests/utils/operator/suite_test.go index e8e0072475..b2d40da5d8 100644 --- a/internal/cmd/manager/walarchive/suite_test.go +++ b/tests/utils/operator/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package walarchive +package operator import ( "testing" @@ -25,5 +28,5 @@ import ( func TestUtils(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "walarchive test suite") + RunSpecs(t, "Utils test release suite") } diff --git a/tests/utils/upgrade.go b/tests/utils/operator/upgrade.go similarity index 64% rename from tests/utils/upgrade.go rename to tests/utils/operator/upgrade.go index c8fa832ffd..798a7dc315 100644 --- a/tests/utils/upgrade.go +++ b/tests/utils/operator/upgrade.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package operator provide functions to handle operator install/uninstall process +package operator import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -24,25 +29,34 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" // nolint . "github.com/onsi/gomega" // nolint ) -// CreateOperatorConfigurationMap creates the operator namespace and enables/disable the online upgrade for +// CreateConfigMap creates the operator namespace and enables/disable the online upgrade for // the instance manager -func CreateOperatorConfigurationMap(pgOperatorNamespace, configName string, isOnline bool, env *TestingEnvironment) { +func CreateConfigMap( + ctx context.Context, + crudClient client.Client, + pgOperatorNamespace, configName string, + isOnline bool, +) { By("creating operator namespace", func() { // Create a upgradeNamespace for all the resources namespacedName := types.NamespacedName{ Name: pgOperatorNamespace, } namespaceResource := &corev1.Namespace{} - err := env.Client.Get(env.Ctx, namespacedName, namespaceResource) + err := crudClient.Get(ctx, namespacedName, namespaceResource) if apierrors.IsNotFound(err) { - err = env.CreateNamespace(pgOperatorNamespace) + err = namespaces.CreateNamespace(ctx, crudClient, pgOperatorNamespace) Expect(err).ToNot(HaveOccurred()) } else if err != nil { Expect(err).ToNot(HaveOccurred()) @@ -61,19 +75,22 @@ func CreateOperatorConfigurationMap(pgOperatorNamespace, configName string, isOn }, Data: map[string]string{"ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES": enable}, } - _, err := CreateObject(env, configMap) + _, err := objects.Create(ctx, crudClient, configMap) Expect(err).NotTo(HaveOccurred()) }) } -// InstallLatestCNPGOperator installs an operator version with the most recent release tag -func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) { +// InstallLatest installs an operator version with the most recent release tag +func InstallLatest( + crudClient client.Client, + releaseTag string, +) { mostRecentReleasePath := "../../releases/cnpg-" + releaseTag + ".yaml" Eventually(func() error { GinkgoWriter.Printf("installing: %s\n", mostRecentReleasePath) - _, stderr, err := RunUnchecked("kubectl apply --server-side --force-conflicts -f " + mostRecentReleasePath) + _, stderr, err := run.Unchecked("kubectl apply --server-side --force-conflicts -f " + mostRecentReleasePath) if err != nil { GinkgoWriter.Printf("stderr: %s\n", stderr) } @@ -82,16 +99,16 @@ func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) { }, 60).ShouldNot(HaveOccurred()) Eventually(func() error { - _, _, err := RunUnchecked( + _, _, err := run.Unchecked( "kubectl wait --for condition=established --timeout=60s " + "crd/clusters.postgresql.cnpg.io") return err }, 150).ShouldNot(HaveOccurred()) Eventually(func() error { - mapping, err := env.Client.RESTMapper().RESTMapping( - schema.GroupKind{Group: apiv1.GroupVersion.Group, Kind: apiv1.ClusterKind}, - apiv1.GroupVersion.Version) + mapping, err := crudClient.RESTMapper().RESTMapping( + schema.GroupKind{Group: apiv1.SchemeGroupVersion.Group, Kind: apiv1.ClusterKind}, + apiv1.SchemeGroupVersion.Version) if err != nil { return err } @@ -102,7 +119,7 @@ func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) { }, 150).ShouldNot(HaveOccurred()) Eventually(func() error { - _, _, err := RunUnchecked( + _, _, err := run.Unchecked( "kubectl wait --for=condition=Available --timeout=2m -n cnpg-system " + "deployments cnpg-controller-manager") return err diff --git a/tests/utils/operator/webhooks.go b/tests/utils/operator/webhooks.go new file mode 100644 index 0000000000..0e4ece0c95 --- /dev/null +++ b/tests/utils/operator/webhooks.go @@ -0,0 +1,214 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package operator + +import ( + "bytes" + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" +) + +// GetMutatingWebhookByName get the MutatingWebhook filtered by the name of one +// of the webhooks +func GetMutatingWebhookByName( + ctx context.Context, + crudClient client.Client, + name string, +) ( + *admissionregistrationv1.MutatingWebhookConfiguration, int, error, +) { + var mWebhooks admissionregistrationv1.MutatingWebhookConfigurationList + err := objects.List(ctx, crudClient, &mWebhooks) + if err != nil { + return nil, 0, err + } + + for i, item := range mWebhooks.Items { + for i2, webhook := range item.Webhooks { + if webhook.Name == name { + return &mWebhooks.Items[i], i2, nil + } + } + } + return nil, 0, fmt.Errorf("mutating webhook not found") +} + +// UpdateMutatingWebhookConf update MutatingWebhookConfiguration object +func UpdateMutatingWebhookConf( + ctx context.Context, + kubeInterface kubernetes.Interface, + wh *admissionregistrationv1.MutatingWebhookConfiguration, +) error { + _, err := kubeInterface.AdmissionregistrationV1(). + MutatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +// getCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator +func getCNPGsValidatingWebhookConf( + ctx context.Context, + crudClient client.Client, +) ( + *admissionregistrationv1.ValidatingWebhookConfiguration, error, +) { + validatingWebhookConf := &admissionregistrationv1.ValidatingWebhookConfiguration{} + err := crudClient.Get(ctx, types.NamespacedName{Name: controller.ValidatingWebhookConfigurationName}, + validatingWebhookConf) + return validatingWebhookConf, err +} + +// GetValidatingWebhookByName get ValidatingWebhook by the name of one +// of the webhooks +func GetValidatingWebhookByName( + ctx context.Context, + crudClient client.Client, + name string, +) ( + *admissionregistrationv1.ValidatingWebhookConfiguration, int, error, +) { + var vWebhooks admissionregistrationv1.ValidatingWebhookConfigurationList + err := objects.List(ctx, crudClient, &vWebhooks) + if err != nil { + return nil, 0, err + } + + for i, item := range vWebhooks.Items { + for i2, webhook := range item.Webhooks { + if webhook.Name == name { + return &vWebhooks.Items[i], i2, nil + } + } + } + return nil, 0, fmt.Errorf("validating webhook not found") +} + +// UpdateValidatingWebhookConf update the ValidatingWebhook object +func UpdateValidatingWebhookConf( + ctx context.Context, + kubeInterface kubernetes.Interface, + wh *admissionregistrationv1.ValidatingWebhookConfiguration, +) error { + _, err := kubeInterface.AdmissionregistrationV1(). + ValidatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +// checkWebhookSetup ensures that the operator has finished the webhook setup. +func checkWebhookSetup( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { + // Check CA + secret := &corev1.Secret{} + secretNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: controller.WebhookSecretName, + } + err := objects.Get(ctx, crudClient, secretNamespacedName, secret) + if err != nil { + return err + } + + ca := secret.Data["tls.crt"] + + mutatingWebhookConfig, err := getCNPGsMutatingWebhookConf(ctx, crudClient) + if err != nil { + return err + } + + for _, webhook := range mutatingWebhookConfig.Webhooks { + if !bytes.Equal(webhook.ClientConfig.CABundle, ca) { + return fmt.Errorf("secret %+v not match with ca bundle in %v: %v is not equal to %v", + controller.MutatingWebhookConfigurationName, secret, string(ca), string(webhook.ClientConfig.CABundle)) + } + } + + validatingWebhookConfig, err := getCNPGsValidatingWebhookConf(ctx, crudClient) + if err != nil { + return err + } + + for _, webhook := range validatingWebhookConfig.Webhooks { + if !bytes.Equal(webhook.ClientConfig.CABundle, ca) { + return fmt.Errorf("secret not match with ca bundle in %v", + controller.ValidatingWebhookConfigurationName) + } + } + + return nil +} + +// getCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator +func getCNPGsMutatingWebhookConf( + ctx context.Context, + crudClient client.Client, +) ( + *admissionregistrationv1.MutatingWebhookConfiguration, error, +) { + mutatingWebhookConfiguration := &admissionregistrationv1.MutatingWebhookConfiguration{} + err := crudClient.Get(ctx, types.NamespacedName{Name: controller.MutatingWebhookConfigurationName}, + mutatingWebhookConfiguration) + return mutatingWebhookConfiguration, err +} + +// CheckWebhookSetup checks if the webhook denies an invalid request +func isWebhookWorking( + ctx context.Context, + crudClient client.Client, +) (bool, error) { + invalidCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "invalid"}, + Spec: apiv1.ClusterSpec{Instances: 1}, + } + _, err := objects.Create( + ctx, + crudClient, + invalidCluster, + &client.CreateOptions{DryRun: []string{metav1.DryRunAll}}, + ) + // If the error is not an invalid error, return false + if !errors.IsInvalid(err) { + return false, fmt.Errorf("expected invalid error, got: %v", err) + } + // If the error doesn't contain the expected message, return false + if !bytes.Contains([]byte(err.Error()), []byte("spec.storage.size")) { + return false, fmt.Errorf("expected error to contain 'spec.storage.size', got: %v", err) + } + return true, nil +} diff --git a/tests/utils/pod.go b/tests/utils/pod.go deleted file mode 100644 index f62b4fcba1..0000000000 --- a/tests/utils/pod.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bytes" - "errors" - "fmt" - "io" - "regexp" - "strings" - "time" - - "github.com/avast/retry-go/v4" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// PodCreateAndWaitForReady creates a given pod object and wait for it to be ready -func PodCreateAndWaitForReady(env *TestingEnvironment, pod *corev1.Pod, timeoutSeconds uint) error { - _, err := CreateObject(env, pod) - if err != nil { - return err - } - return PodWaitForReady(env, pod, timeoutSeconds) -} - -// PodWaitForReady waits for a pod to be ready -func PodWaitForReady(env *TestingEnvironment, pod *corev1.Pod, timeoutSeconds uint) error { - err := retry.Do( - func() error { - if err := env.Client.Get(env.Ctx, client.ObjectKey{ - Namespace: pod.Namespace, - Name: pod.Name, - }, pod); err != nil { - return err - } - if !pkgutils.IsPodReady(*pod) { - return fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) - } - return nil - }, - retry.Attempts(timeoutSeconds), - retry.Delay(time.Second), - retry.DelayType(retry.FixedDelay), - ) - return err -} - -// PodHasLabels verifies that the labels of a pod contain a specified -// labels map -func PodHasLabels(pod corev1.Pod, labels map[string]string) bool { - podLabels := pod.Labels - for k, v := range labels { - val, ok := podLabels[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// PodHasAnnotations verifies that the annotations of a pod contain a specified -// annotations map -func PodHasAnnotations(pod corev1.Pod, annotations map[string]string) bool { - podAnnotations := pod.Annotations - for k, v := range annotations { - val, ok := podAnnotations[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// PodHasCondition verifies that a pod has a specified condition -func PodHasCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, status corev1.ConditionStatus) bool { - for _, cond := range pod.Status.Conditions { - if cond.Type == conditionType && cond.Status == status { - return true - } - } - return false -} - -// DeletePod deletes a pod if existent -func (env TestingEnvironment) DeletePod(namespace string, name string, opts ...client.DeleteOption) error { - u := &unstructured.Unstructured{} - u.SetName(name) - u.SetNamespace(namespace) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Pod", - }) - - return DeleteObject(&env, u, opts...) -} - -// GetPodLogs gathers pod logs -func (env TestingEnvironment) GetPodLogs(namespace string, podName string) (string, error) { - req := env.Interface.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) - podLogs, err := req.Stream(env.Ctx) - if err != nil { - return "", err - } - defer func() { - innerErr := podLogs.Close() - if err == nil && innerErr != nil { - err = innerErr - } - }() - - // Create a buffer to hold JSON data - buf := new(bytes.Buffer) - _, err = io.Copy(buf, podLogs) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// GetPodList gathers the current list of pods in a namespace -func (env TestingEnvironment) GetPodList(namespace string) (*corev1.PodList, error) { - podList := &corev1.PodList{} - err := GetObjectList( - &env, podList, client.InNamespace(namespace), - ) - return podList, err -} - -// GetManagerVersion returns the current manager version of a given pod -func GetManagerVersion(namespace, podName string) (string, error) { - out, _, err := RunUnchecked(fmt.Sprintf( - "kubectl -n %v exec %v -c postgres -- /controller/manager version", - namespace, - podName, - )) - if err != nil { - return "", err - } - versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`) - ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1] - return ver, nil -} - -// GetPod gets a pod by namespace and name -func (env TestingEnvironment) GetPod(namespace, podName string) (*corev1.Pod, error) { - wrapErr := func(err error) error { - return fmt.Errorf("while getting pod '%s/%s': %w", namespace, podName, err) - } - podList, err := env.GetPodList(namespace) - if err != nil { - return nil, wrapErr(err) - } - for _, pod := range podList.Items { - if podName == pod.Name { - return &pod, nil - } - } - return nil, wrapErr(errors.New("pod not found")) -} - -// ContainerLocator contains the necessary data to find a container on a pod -type ContainerLocator struct { - Namespace string - PodName string - ContainerName string -} - -// ExecCommandInContainer executes commands in a given instance pod, in the -// postgres container -func (env TestingEnvironment) ExecCommandInContainer( - container ContainerLocator, - timeout *time.Duration, - command ...string, -) (string, string, error) { - wrapErr := func(err error) error { - return fmt.Errorf("while executing command in pod '%s/%s': %w", - container.Namespace, container.PodName, err) - } - pod, err := env.GetPod(container.Namespace, container.PodName) - if err != nil { - return "", "", wrapErr(err) - } - if !pkgutils.IsPodReady(*pod) { - return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) - } - return env.ExecCommand(env.Ctx, *pod, container.ContainerName, timeout, command...) -} - -// PodLocator contains the necessary data to find a pod -type PodLocator struct { - Namespace string - PodName string -} - -// ExecCommandInInstancePod executes commands in a given instance pod, in the -// postgres container -func (env TestingEnvironment) ExecCommandInInstancePod( - podLocator PodLocator, - timeout *time.Duration, - command ...string, -) (string, string, error) { - return env.ExecCommandInContainer( - ContainerLocator{ - Namespace: podLocator.Namespace, - PodName: podLocator.PodName, - ContainerName: specs.PostgresContainerName, - }, timeout, command...) -} - -// DatabaseName is a special type for the database argument in an Exec call -type DatabaseName string - -// ExecQueryInInstancePod executes a query in an instance pod, by connecting to the pod -// and the postgres container, and using a local connection with the postgres user -func (env TestingEnvironment) ExecQueryInInstancePod( - podLocator PodLocator, - dbname DatabaseName, - query string, -) (string, string, error) { - timeout := time.Second * 10 - return env.ExecCommandInInstancePod( - PodLocator{ - Namespace: podLocator.Namespace, - PodName: podLocator.PodName, - }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query) -} diff --git a/tests/utils/pods/pod.go b/tests/utils/pods/pod.go new file mode 100644 index 0000000000..1021655e54 --- /dev/null +++ b/tests/utils/pods/pod.go @@ -0,0 +1,197 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package pods provides pod utilities to manage pods inside K8s +package pods + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/avast/retry-go/v4" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" +) + +// List gathers the current list of pods in a namespace +func List( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*v1.PodList, error) { + podList := &v1.PodList{} + err := objects.List( + ctx, crudClient, podList, client.InNamespace(namespace), + ) + return podList, err +} + +// Delete deletes a pod if existent +func Delete( + ctx context.Context, + crudClient client.Client, + namespace, name string, + opts ...client.DeleteOption, +) error { + u := &unstructured.Unstructured{} + u.SetName(name) + u.SetNamespace(namespace) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + return objects.Delete(ctx, crudClient, u, opts...) +} + +// CreateAndWaitForReady creates a given pod object and wait for it to be ready +func CreateAndWaitForReady( + ctx context.Context, + crudClient client.Client, + pod *v1.Pod, + timeoutSeconds uint, +) error { + _, err := objects.Create(ctx, crudClient, pod) + if err != nil { + return err + } + return waitForReady(ctx, crudClient, pod, timeoutSeconds) +} + +// waitForReady waits for a pod to be ready +func waitForReady( + ctx context.Context, + crudClient client.Client, + pod *v1.Pod, + timeoutSeconds uint, +) error { + err := retry.Do( + func() error { + if err := crudClient.Get(ctx, client.ObjectKey{ + Namespace: pod.Namespace, + Name: pod.Name, + }, pod); err != nil { + return err + } + if !utils.IsPodReady(*pod) { + return fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) + } + return nil + }, + retry.Attempts(timeoutSeconds), + retry.Delay(time.Second), + retry.DelayType(retry.FixedDelay), + ) + return err +} + +// Logs gathers pod logs +func Logs( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace, podName string, +) (string, error) { + req := kubeInterface.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{}) + podLogs, err := req.Stream(ctx) + if err != nil { + return "", err + } + defer func() { + innerErr := podLogs.Close() + if err == nil && innerErr != nil { + err = innerErr + } + }() + + // Create a buffer to hold JSON data + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// Get gets a pod by namespace and name +func Get( + ctx context.Context, + crudClient client.Client, + namespace, podName string, +) (*v1.Pod, error) { + wrapErr := func(err error) error { + return fmt.Errorf("while getting pod '%s/%s': %w", namespace, podName, err) + } + podList, err := List(ctx, crudClient, namespace) + if err != nil { + return nil, wrapErr(err) + } + for _, pod := range podList.Items { + if podName == pod.Name { + return &pod, nil + } + } + return nil, wrapErr(errors.New("pod not found")) +} + +// HasLabels verifies that the labels of a pod contain a specified +// labels map +func HasLabels(pod v1.Pod, labels map[string]string) bool { + podLabels := pod.Labels + for k, v := range labels { + val, ok := podLabels[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// HasAnnotations verifies that the annotations of a pod contain a specified +// annotations map +func HasAnnotations(pod v1.Pod, annotations map[string]string) bool { + podAnnotations := pod.Annotations + for k, v := range annotations { + val, ok := podAnnotations[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// HasCondition verifies that a pod has a specified condition +func HasCondition(pod *v1.Pod, conditionType v1.PodConditionType, status v1.ConditionStatus) bool { + for _, cond := range pod.Status.Conditions { + if cond.Type == conditionType && cond.Status == status { + return true + } + } + return false +} diff --git a/tests/utils/postgres.go b/tests/utils/postgres.go deleted file mode 100644 index e85f1bf573..0000000000 --- a/tests/utils/postgres.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" -) - -const ( - // PGLocalSocketDir is the directory containing the PostgreSQL local socket - PGLocalSocketDir = "/controller/run" - // AppUser for app user - AppUser = "app" - // PostgresUser for postgres user - PostgresUser = "postgres" - // AppDBName database name app - AppDBName = "app" - // PostgresDBName database name postgres - PostgresDBName = "postgres" -) - -// RunQueryFromPod executes a query from a pod to a host -func RunQueryFromPod( - connectingPod *corev1.Pod, - host string, - dbname string, - user string, - password string, - query string, - env *TestingEnvironment, -) (string, string, error) { - timeout := time.Second * 10 - dsn := CreateDSN(host, user, dbname, password, Prefer, 5432) - - stdout, stderr, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", query) - return stdout, stderr, err -} - -// CountReplicas counts the number of replicas attached to an instance -func CountReplicas(env *TestingEnvironment, pod *corev1.Pod) (int, error) { - query := "SELECT count(*) FROM pg_stat_replication" - commandTimeout := time.Second * 10 - stdOut, _, err := env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", query) - if err != nil { - return 0, nil - } - return strconv.Atoi(strings.Trim(stdOut, "\n")) -} diff --git a/tests/utils/postgres/doc.go b/tests/utils/postgres/doc.go new file mode 100644 index 0000000000..622dd658f3 --- /dev/null +++ b/tests/utils/postgres/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package postgres provides functions to handle postgres in cnpg clusters +package postgres diff --git a/tests/utils/postgres/postgres.go b/tests/utils/postgres/postgres.go new file mode 100644 index 0000000000..3c081fccc2 --- /dev/null +++ b/tests/utils/postgres/postgres.go @@ -0,0 +1,154 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package postgres + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" +) + +const ( + // PGLocalSocketDir is the directory containing the PostgreSQL local socket + PGLocalSocketDir = "/controller/run" + // AppUser for app user + AppUser = "app" + // PostgresUser for postgres user + PostgresUser = "postgres" + // AppDBName database name app + AppDBName = "app" + // PostgresDBName database name postgres + PostgresDBName = "postgres" + // TablespaceDefaultName is the default tablespace location + TablespaceDefaultName = "pg_default" +) + +// CountReplicas counts the number of replicas attached to an instance +func CountReplicas( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + pod *corev1.Pod, + retryTimeout int, +) (int, error) { + query := "SELECT count(*) FROM pg_catalog.pg_stat_replication" + stdOut, _, err := exec.EventuallyExecQueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, AppDBName, + query, + retryTimeout, + objects.PollingTime, + ) + if err != nil { + return 0, nil + } + return strconv.Atoi(strings.Trim(stdOut, "\n")) +} + +// GetCurrentTimestamp getting current time stamp from postgres server +func GetCurrentTimestamp( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, clusterName string, +) (string, error) { + row, err := RunQueryRowOverForward( + ctx, + crudClient, + kubeInterface, + restConfig, + namespace, + clusterName, + AppDBName, + v1.ApplicationUserSecretSuffix, + "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');", + ) + if err != nil { + return "", err + } + + var currentTimestamp string + if err = row.Scan(¤tTimestamp); err != nil { + return "", err + } + + return currentTimestamp, nil +} + +// BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available) +func BumpPostgresImageMajorVersion(postgresImage string) (string, error) { + imageReference := reference.New(postgresImage) + + postgresImageVersion, err := version.FromTag(imageReference.Tag) + if err != nil { + return "", err + } + + targetPostgresImageMajorVersionInt := postgresImageVersion.Major() + 1 + + defaultImageVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + if err != nil { + return "", err + } + + if targetPostgresImageMajorVersionInt >= defaultImageVersion.Major() { + return postgresImage, nil + } + + imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion.Major()+1) + + return imageReference.GetNormalizedName(), nil +} + +// IsLatestMajor returns true if the given postgresImage is using latest Postgres major version +func IsLatestMajor(postgresImage string) bool { + // Get the current tag + currentImageReference := reference.New(postgresImage) + currentImageVersion, err := version.FromTag(currentImageReference.Tag) + if err != nil { + return false + } + // Get the default tag + defaultImageReference := reference.New(versions.DefaultImageName) + defaultImageVersion, err := version.FromTag(defaultImageReference.Tag) + if err != nil { + return false + } + + return currentImageVersion.Major() >= defaultImageVersion.Major() +} diff --git a/tests/utils/version_test.go b/tests/utils/postgres/postgres_test.go similarity index 91% rename from tests/utils/version_test.go rename to tests/utils/postgres/postgres_test.go index 64c7ca13e8..db6f72b272 100644 --- a/tests/utils/version_test.go +++ b/tests/utils/postgres/postgres_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +package postgres import ( "bytes" diff --git a/tests/utils/postgres/psql_connection.go b/tests/utils/postgres/psql_connection.go new file mode 100644 index 0000000000..ecd7980899 --- /dev/null +++ b/tests/utils/postgres/psql_connection.go @@ -0,0 +1,275 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package postgres + +import ( + "context" + "database/sql" + "io" + "time" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/forwardconnection" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" +) + +// PSQLForwardConnection manages the creation of a port-forwarding to open a new database connection +type PSQLForwardConnection struct { + pooler pool.Pooler + portForward *portforward.PortForwarder +} + +// Close will stop the port-forwarding and exit +func (psqlc *PSQLForwardConnection) Close() { + psqlc.portForward.Close() +} + +// GetPooler returns the connection Pooler +func (psqlc *PSQLForwardConnection) GetPooler() pool.Pooler { + return psqlc.pooler +} + +// createConnectionParameters returns a map of parameters required to perform a connection +func createConnectionParameters(user, password, localPort string) map[string]string { + return map[string]string{ + "host": "localhost", + "port": localPort, + "user": user, + "password": password, + } +} + +func startForwardConnection( + dialer httpstream.Dialer, + portMap []string, + dbname, + userApp, + passApp string, +) (*PSQLForwardConnection, *sql.DB, error) { + forwarder, err := forwardconnection.NewForwardConnection( + dialer, + portMap, + io.Discard, + io.Discard, + ) + if err != nil { + return nil, nil, err + } + + if err = forwarder.StartAndWait(); err != nil { + return nil, nil, err + } + + localPort, err := forwarder.GetLocalPort() + if err != nil { + return nil, nil, err + } + + connParameters := createConnectionParameters(userApp, passApp, localPort) + + pooler := pool.NewPgbouncerConnectionPool(configfile.CreateConnectionString(connParameters)) + + conn, err := pooler.Connection(dbname) + if err != nil { + return nil, nil, err + } + + conn.SetMaxOpenConns(10) + conn.SetMaxIdleConns(10) + conn.SetConnMaxLifetime(time.Hour) + conn.SetConnMaxIdleTime(time.Hour) + + return &PSQLForwardConnection{ + portForward: forwarder.Forwarder, + pooler: pooler, + }, conn, err +} + +// ForwardPSQLConnection simplifies the creation of forwarded connection to PostgreSQL cluster +func ForwardPSQLConnection( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, + clusterName, + dbname, + secretSuffix string, +) (*PSQLForwardConnection, *sql.DB, error) { + user, pass, err := secrets.GetCredentials(ctx, crudClient, clusterName, namespace, secretSuffix) + if err != nil { + return nil, nil, err + } + + return ForwardPSQLConnectionWithCreds( + ctx, + crudClient, + kubeInterface, + restConfig, + namespace, clusterName, dbname, user, pass, + ) +} + +// ForwardPSQLConnectionWithCreds creates a forwarded connection to a PostgreSQL cluster +// using the given credentials +func ForwardPSQLConnectionWithCreds( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, + clusterName, + dbname, + userApp, + passApp string, +) (*PSQLForwardConnection, *sql.DB, error) { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return nil, nil, err + } + + dialer, err := forwardconnection.NewDialer( + kubeInterface, + restConfig, + namespace, + cluster.Status.CurrentPrimary, + ) + if err != nil { + return nil, nil, err + } + + psqlForwardConn, conn, err := startForwardConnection( + dialer, + []string{forwardconnection.PostgresPortMap}, + dbname, + userApp, + passApp, + ) + if err != nil { + return nil, nil, err + } + + return psqlForwardConn, conn, err +} + +// ForwardPSQLServiceConnection creates a forwarded connection to a PostgreSQL service +// using the given credentials +func ForwardPSQLServiceConnection( + ctx context.Context, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, + serviceName, + dbname, + userApp, + passApp string, +) (*PSQLForwardConnection, *sql.DB, error) { + dialer, portMap, err := forwardconnection.NewDialerFromService( + ctx, + kubeInterface, + restConfig, + namespace, + serviceName, + ) + if err != nil { + return nil, nil, err + } + + psqlForwardConn, conn, err := startForwardConnection(dialer, portMap, dbname, userApp, passApp) + if err != nil { + return nil, nil, err + } + + return psqlForwardConn, conn, err +} + +// RunQueryRowOverForward runs QueryRow with a given query, returning the Row of the SQL command +func RunQueryRowOverForward( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, + clusterName, + dbname, + secretSuffix, + query string, +) (*sql.Row, error) { + forward, conn, err := ForwardPSQLConnection( + ctx, + crudClient, + kubeInterface, + restConfig, + namespace, + clusterName, + dbname, + secretSuffix, + ) + if err != nil { + return nil, err + } + defer func() { + _ = conn.Close() + forward.Close() + }() + + return conn.QueryRow(query), nil +} + +// RunExecOverForward runs Exec with a given query, returning the Result of the SQL command +func RunExecOverForward( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, + clusterName, + dbname, + secretSuffix, + query string, +) (sql.Result, error) { + forward, conn, err := ForwardPSQLConnection( + ctx, + crudClient, + kubeInterface, + restConfig, + namespace, + clusterName, + dbname, + secretSuffix, + ) + if err != nil { + return nil, err + } + defer func() { + _ = conn.Close() + forward.Close() + }() + + return conn.Exec(query) +} diff --git a/tests/utils/postgres/suite_test.go b/tests/utils/postgres/suite_test.go new file mode 100644 index 0000000000..703df68d4c --- /dev/null +++ b/tests/utils/postgres/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package postgres + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test postgres suite") +} diff --git a/tests/utils/proxy.go b/tests/utils/proxy/proxy.go similarity index 61% rename from tests/utils/proxy.go rename to tests/utils/proxy/proxy.go index d17b477eb4..0a303d2e61 100644 --- a/tests/utils/proxy.go +++ b/tests/utils/proxy/proxy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,20 +13,32 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package proxy provides functions to use the proxy subresource to call a pod +package proxy import ( + "context" "strconv" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" ) // runProxyRequest makes a GET call on the pod interface proxy, and returns the raw response -func runProxyRequest(env *TestingEnvironment, pod *corev1.Pod, tlsEnabled bool, path string, port int) ([]byte, error) { +func runProxyRequest( + ctx context.Context, + kubeInterface kubernetes.Interface, + pod *corev1.Pod, + tlsEnabled bool, + path string, + port int, +) ([]byte, error) { portString := strconv.Itoa(port) schema := "http" @@ -33,40 +46,43 @@ func runProxyRequest(env *TestingEnvironment, pod *corev1.Pod, tlsEnabled bool, schema = "https" } - req := env.Interface.CoreV1().Pods(pod.Namespace).ProxyGet( + req := kubeInterface.CoreV1().Pods(pod.Namespace).ProxyGet( schema, pod.Name, portString, path, map[string]string{}) - return req.DoRaw(env.Ctx) + return req.DoRaw(ctx) } // RetrieveMetricsFromInstance aims to retrieve the metrics from a PostgreSQL instance pod // using a GET request on the pod interface proxy func RetrieveMetricsFromInstance( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, pod corev1.Pod, tlsEnabled bool, ) (string, error) { - body, err := runProxyRequest(env, &pod, tlsEnabled, url.PathMetrics, int(url.PostgresMetricsPort)) + body, err := runProxyRequest(ctx, kubeInterface, &pod, tlsEnabled, url.PathMetrics, int(url.PostgresMetricsPort)) return string(body), err } // RetrieveMetricsFromPgBouncer aims to retrieve the metrics from a PgBouncer pod // using a GET request on the pod interface proxy func RetrieveMetricsFromPgBouncer( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, pod corev1.Pod, ) (string, error) { - body, err := runProxyRequest(env, &pod, false, url.PathMetrics, int(url.PgBouncerMetricsPort)) + body, err := runProxyRequest(ctx, kubeInterface, &pod, false, url.PathMetrics, int(url.PgBouncerMetricsPort)) return string(body), err } // RetrievePgStatusFromInstance aims to retrieve the pgStatus from a PostgreSQL instance pod // using a GET request on the pod interface proxy func RetrievePgStatusFromInstance( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, pod corev1.Pod, tlsEnabled bool, ) (string, error) { - body, err := runProxyRequest(env, &pod, tlsEnabled, url.PathPgStatus, int(url.StatusPort)) + body, err := runProxyRequest(ctx, kubeInterface, &pod, tlsEnabled, url.PathPgStatus, int(url.StatusPort)) return string(body), err } diff --git a/tests/utils/psql_client.go b/tests/utils/psql_client.go deleted file mode 100644 index b64f03dd34..0000000000 --- a/tests/utils/psql_client.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" -) - -// GetPsqlClient gets a psql client pod for service connectivity -func GetPsqlClient(namespace string, env *TestingEnvironment) (*corev1.Pod, error) { - _ = corev1.AddToScheme(env.Scheme) - _ = appsv1.AddToScheme(env.Scheme) - pod := &corev1.Pod{} - err := env.CreateNamespace(namespace) - if err != nil { - return pod, err - } - pod, err = createPsqlClient(namespace, env) - if err != nil { - return pod, err - } - err = PodWaitForReady(env, pod, 300) - if err != nil { - return pod, err - } - return pod, nil -} - -// createPsqlClient creates a psql client -func createPsqlClient(namespace string, env *TestingEnvironment) (*corev1.Pod, error) { - seccompProfile := &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } - - psqlPod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - // The pod name follows a convention: "psql-client-0", derived from the StatefulSet name. - Name: "psql-client-0", - Labels: map[string]string{"run": "psql"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: specs.PostgresContainerName, - Image: versions.DefaultImageName, - // override the default Entrypoint ("docker-entrypoint.sh") of the image - Command: []string{"bash", "-c"}, - // override the default Cmd ("postgres") of the image - // sleep enough time to keep the pod running until we finish the E2E tests - Args: []string{"sleep 7200"}, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: seccompProfile, - }, - }, - }, - DNSPolicy: corev1.DNSClusterFirst, - RestartPolicy: corev1.RestartPolicyAlways, - SecurityContext: &corev1.PodSecurityContext{ - SeccompProfile: seccompProfile, - }, - }, - } - - // The psql pod might be deleted by, for example, a node drain. As such we need to use - // either a StatefulSet or a Deployment to make sure the pod is always getting recreated. - // To avoid having to reference a new random name created by the Deployment each time the - // pod gets recreated, we choose to use a StatefulSet. - psqlStatefulSet := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "psql-client", - Labels: map[string]string{"run": "psql"}, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"run": "psql"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: psqlPod.ObjectMeta, - Spec: psqlPod.Spec, - }, - }, - } - - err := env.Client.Create(env.Ctx, &psqlStatefulSet) - if err != nil { - return &corev1.Pod{}, err - } - - return psqlPod, nil -} diff --git a/tests/utils/replication_slots.go b/tests/utils/replicationslot/replication_slots.go similarity index 60% rename from tests/utils/replication_slots.go rename to tests/utils/replicationslot/replication_slots.go index 9279a01ce1..ad78f4a04c 100644 --- a/tests/utils/replication_slots.go +++ b/tests/utils/replicationslot/replication_slots.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,38 +13,52 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package replicationslot provides functions to manage the replication slot of a +// cnpg cluster +package replicationslot import ( + "context" "fmt" "sort" "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/utils/ptr" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" ) // PrintReplicationSlots prints replications slots with their restart_lsn func PrintReplicationSlots( - namespace, - clusterName string, - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, clusterName, dbName string, ) string { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) if err != nil { return fmt.Sprintf("Couldn't retrieve the cluster's podlist: %v\n", err) } var output strings.Builder for i, pod := range podList.Items { - slots, err := GetReplicationSlotsOnPod(namespace, pod.GetName(), env) + slots, err := GetReplicationSlotsOnPod( + ctx, crudClient, kubeInterface, restConfig, + namespace, pod.GetName(), dbName, + ) if err != nil { return fmt.Sprintf("Couldn't retrieve slots for pod %v: %v\n", pod.GetName(), err) } @@ -54,13 +69,15 @@ func PrintReplicationSlots( } m := make(map[string]string) for _, slot := range slots { - restartLsn, _, err := RunQueryFromPod( - &podList.Items[i], PGLocalSocketDir, - "app", - "postgres", - "''", - fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot), - env) + query := fmt.Sprintf("SELECT restart_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '%v'", slot) + restartLsn, _, err := exec.QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ + Namespace: podList.Items[i].Namespace, + PodName: podList.Items[i].Name, + }, + exec.DatabaseName(dbName), + query) if err != nil { output.WriteString(fmt.Sprintf("Couldn't retrieve restart_lsn for slot %v: %v\n", slot, err)) } @@ -88,15 +105,16 @@ func AreSameLsn(lsnList []string) bool { // GetExpectedHAReplicationSlotsOnPod returns a slice of replication slot names which should be present // in a given pod func GetExpectedHAReplicationSlotsOnPod( + ctx context.Context, + crudClient client.Client, namespace, clusterName, podName string, - env *TestingEnvironment, ) ([]string, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) if err != nil { return nil, err } - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return nil, err } @@ -114,20 +132,32 @@ func GetExpectedHAReplicationSlotsOnPod( // GetReplicationSlotsOnPod returns a slice containing the names of the current replication slots present in // a given pod -func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment) ([]string, error) { +func GetReplicationSlotsOnPod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, podName, dbName string, +) ([]string, error) { namespacedName := types.NamespacedName{ Namespace: namespace, Name: podName, } targetPod := &corev1.Pod{} - err := env.Client.Get(env.Ctx, namespacedName, targetPod) + err := crudClient.Get(ctx, namespacedName, targetPod) if err != nil { return nil, err } - stdout, _, err := RunQueryFromPod(targetPod, PGLocalSocketDir, - "app", "postgres", "''", - "SELECT slot_name FROM pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'", env) + query := "SELECT slot_name FROM pg_catalog.pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'" + stdout, _, err := exec.QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ + Namespace: targetPod.Namespace, + PodName: targetPod.Name, + }, + exec.DatabaseName(dbName), + query) if err != nil { return nil, err } @@ -144,21 +174,30 @@ func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment // GetReplicationSlotLsnsOnPod returns a slice containing the current restart_lsn values of each // replication slot present in a given pod func GetReplicationSlotLsnsOnPod( - namespace, clusterName string, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, clusterName, dbName string, pod corev1.Pod, - env *TestingEnvironment, ) ([]string, error) { - slots, err := GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + slots, err := GetExpectedHAReplicationSlotsOnPod(ctx, crudClient, namespace, clusterName, pod.GetName()) if err != nil { return nil, err } lsnList := make([]string, 0, len(slots)) for _, slot := range slots { - query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", + query := fmt.Sprintf("SELECT restart_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '%v'", slot) - restartLsn, _, err := RunQueryFromPod(&pod, PGLocalSocketDir, - "app", "postgres", "''", query, env) + restartLsn, _, err := exec.QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + exec.DatabaseName(dbName), + query) if err != nil { return nil, err } @@ -168,8 +207,13 @@ func GetReplicationSlotLsnsOnPod( } // ToggleHAReplicationSlots sets the HA Replication Slot feature on/off depending on `enable` -func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *TestingEnvironment) error { - cluster, err := env.GetCluster(namespace, clusterName) +func ToggleHAReplicationSlots( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + enable bool, +) error { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return err } @@ -183,7 +227,7 @@ func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *T } clusterToggle.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(enable) - err = env.Client.Patch(env.Ctx, clusterToggle, ctrlclient.MergeFrom(cluster)) + err = crudClient.Patch(ctx, clusterToggle, client.MergeFrom(cluster)) if err != nil { return err } @@ -191,8 +235,13 @@ func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *T } // ToggleSynchronizeReplicationSlots sets the Synchronize Replication Slot feature on/off depending on `enable` -func ToggleSynchronizeReplicationSlots(namespace, clusterName string, enable bool, env *TestingEnvironment) error { - cluster, err := env.GetCluster(namespace, clusterName) +func ToggleSynchronizeReplicationSlots( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + enable bool, +) error { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return err } @@ -206,7 +255,7 @@ func ToggleSynchronizeReplicationSlots(namespace, clusterName string, enable boo } clusterToggle.Spec.ReplicationSlots.SynchronizeReplicas.Enabled = ptr.To(enable) - err = env.Client.Patch(env.Ctx, clusterToggle, ctrlclient.MergeFrom(cluster)) + err = crudClient.Patch(ctx, clusterToggle, client.MergeFrom(cluster)) if err != nil { return err } diff --git a/tests/utils/run.go b/tests/utils/run/run.go similarity index 69% rename from tests/utils/run.go rename to tests/utils/run/run.go index 7ae3092cbd..dd57c64498 100644 --- a/tests/utils/run.go +++ b/tests/utils/run/run.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package run contains functions to execute commands locally +package run import ( "bytes" @@ -26,10 +30,12 @@ import ( "github.com/avast/retry-go/v4" "github.com/google/shlex" "github.com/onsi/ginkgo/v2" + + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) -// RunUnchecked executes a command and process the information -func RunUnchecked(command string) (stdout string, stderr string, err error) { +// Unchecked executes a command and process the information +func Unchecked(command string) (stdout string, stderr string, err error) { tokens, err := shlex.Split(command) if err != nil { ginkgo.GinkgoWriter.Printf("Error parsing command `%v`: %v\n", command, err) @@ -48,8 +54,8 @@ func RunUnchecked(command string) (stdout string, stderr string, err error) { return } -// RunUncheckedRetry executes a command and process the information with retry -func RunUncheckedRetry(command string) (stdout string, stderr string, err error) { +// UncheckedRetry executes a command and process the information with retry +func UncheckedRetry(command string) (stdout string, stderr string, err error) { var tokens []string tokens, err = shlex.Split(command) if err != nil { @@ -64,8 +70,8 @@ func RunUncheckedRetry(command string) (stdout string, stderr string, err error) cmd.Stdout, cmd.Stderr = &outBuffer, &errBuffer return cmd.Run() }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), + retry.Delay(objects.PollingTime*time.Second), + retry.Attempts(objects.RetryAttempts), retry.DelayType(retry.FixedDelay), ) stdout = outBuffer.String() @@ -78,19 +84,7 @@ func RunUncheckedRetry(command string) (stdout string, stderr string, err error) // Run executes a command and prints the output when terminates with an error func Run(command string) (stdout string, stderr string, err error) { - stdout, stderr, err = RunUnchecked(command) - - var exerr *exec.ExitError - if errors.As(err, &exerr) { - ginkgo.GinkgoWriter.Printf("RunCheck: %v\nExitCode: %v\n Out:\n%v\nErr:\n%v\n", - command, exerr.ExitCode(), stdout, stderr) - } - return -} - -// RunRetry executes a command with retry and prints the output when terminates with an error -func RunRetry(command string) (stdout string, stderr string, err error) { - stdout, stderr, err = RunUncheckedRetry(command) + stdout, stderr, err = Unchecked(command) var exerr *exec.ExitError if errors.As(err, &exerr) { diff --git a/tests/utils/secrets.go b/tests/utils/secrets/secrets.go similarity index 62% rename from tests/utils/secrets.go rename to tests/utils/secrets/secrets.go index f45c2b1cb9..7cda99e028 100644 --- a/tests/utils/secrets.go +++ b/tests/utils/secrets/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,29 +13,36 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package secrets provides functions to manage and handle secrets +package secrets import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) // CreateSecretCA generates a CA for the cluster and return the cluster and the key pair func CreateSecretCA( - namespace string, - clusterName string, - caSecName string, + ctx context.Context, + crudClient client.Client, + namespace, clusterName, caSecName string, includeCAPrivateKey bool, - env *TestingEnvironment) ( +) ( *apiv1.Cluster, *certs.KeyPair, error, ) { // creating root CA certificates @@ -42,7 +50,7 @@ func CreateSecretCA( cluster.Namespace = namespace cluster.Name = clusterName secret := &corev1.Secret{} - err := env.Client.Get(env.Ctx, client.ObjectKey{Namespace: namespace, Name: caSecName}, secret) + err := crudClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: caSecName}, secret) if !apierrors.IsNotFound(err) { return cluster, nil, err } @@ -57,7 +65,7 @@ func CreateSecretCA( if !includeCAPrivateKey { delete(caSecret.Data, certs.CAPrivateKeyKey) } - _, err = CreateObject(env, caSecret) + _, err = objects.Create(ctx, crudClient, caSecret) if err != nil { return cluster, caPair, err } @@ -66,13 +74,14 @@ func CreateSecretCA( // GetCredentials retrieve username and password from secrets and return it as per user suffix func GetCredentials( - clusterName, namespace string, - secretSuffix string, - env *TestingEnvironment) ( + ctx context.Context, + crudClient client.Client, + clusterName, namespace, secretSuffix string, +) ( string, string, error, ) { // Get the cluster - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return "", "", err } @@ -93,7 +102,7 @@ func GetCredentials( Namespace: namespace, Name: secretName, } - err = env.Client.Get(env.Ctx, secretNamespacedName, secret) + err = crudClient.Get(ctx, secretNamespacedName, secret) if err != nil { return "", "", err } @@ -101,3 +110,29 @@ func GetCredentials( password := string(secret.Data["password"]) return username, password, nil } + +// CreateObjectStorageSecret generates an Opaque Secret with a given ID and Key +func CreateObjectStorageSecret( + ctx context.Context, + crudClient client.Client, + namespace, secretName string, + id, key string, +) (*corev1.Secret, error) { + targetSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + StringData: map[string]string{ + "ID": id, + "KEY": key, + }, + Type: corev1.SecretTypeOpaque, + } + obj, err := objects.Create(ctx, crudClient, targetSecret) + if err != nil { + return nil, err + } + + return obj.(*corev1.Secret), nil +} diff --git a/tests/utils/service.go b/tests/utils/services/service.go similarity index 73% rename from tests/utils/service.go rename to tests/utils/services/service.go index e569011b22..b7bfda0f2c 100644 --- a/tests/utils/service.go +++ b/tests/utils/services/service.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +13,20 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package services provides functions tomanage services inside K8s +package services import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) @@ -45,15 +51,19 @@ func GetReadWriteServiceName(clusterName string) string { return fmt.Sprintf("%v%v", clusterName, apiv1.ServiceReadWriteSuffix) } -// GetRwServiceObject return read write service object -func GetRwServiceObject(namespace, clusterName string, env *TestingEnvironment) (*corev1.Service, error) { +// getRwServiceObject return read write service object +func getRwServiceObject( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.Service, error) { svcName := GetReadWriteServiceName(clusterName) service := &corev1.Service{} namespacedName := types.NamespacedName{ Namespace: namespace, Name: svcName, } - err := env.Client.Get(env.Ctx, namespacedName, service) + err := crudClient.Get(ctx, namespacedName, service) if err != nil { return service, err } @@ -68,8 +78,12 @@ func CreateDSN(host, user, dbname, password string, sslmode SSLMode, port int) s } // GetHostName return fully qualified domain name for read write service -func GetHostName(namespace, clusterName string, env *TestingEnvironment) (string, error) { - rwService, err := GetRwServiceObject(namespace, clusterName, env) +func GetHostName( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (string, error) { + rwService, err := getRwServiceObject(ctx, crudClient, namespace, clusterName) if err != nil { return "", err } diff --git a/tests/utils/sternmultitailer/doc.go b/tests/utils/sternmultitailer/doc.go index fa5b323e49..854aac8bb9 100644 --- a/tests/utils/sternmultitailer/doc.go +++ b/tests/utils/sternmultitailer/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package sternmultitailer handle the logs of every pod in the tests diff --git a/tests/utils/sternmultitailer/multitailer.go b/tests/utils/sternmultitailer/multitailer.go index 9218ae1949..ad2adf5998 100644 --- a/tests/utils/sternmultitailer/multitailer.go +++ b/tests/utils/sternmultitailer/multitailer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package sternmultitailer @@ -159,7 +162,7 @@ func outputWriter(baseDir string, logReader io.Reader) { continue } - _, err = file.WriteString(fmt.Sprintf("%v\n", logLine.Message)) + _, err = fmt.Fprintf(file, "%v\n", logLine.Message) if err != nil { fmt.Printf("could not write message to file %v: %v\n", file.Name(), err) continue diff --git a/tests/utils/storage.go b/tests/utils/storage/storage.go similarity index 71% rename from tests/utils/storage.go rename to tests/utils/storage/storage.go index 16883a5f05..afb47c8e28 100644 --- a/tests/utils/storage.go +++ b/tests/utils/storage/storage.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package storage provides functions to manage enything related to storage +package storage import ( + "context" "fmt" "os" @@ -27,18 +32,28 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) // GetStorageAllowExpansion returns the boolean value of the 'AllowVolumeExpansion' value of the storage class -func GetStorageAllowExpansion(defaultStorageClass string, env *TestingEnvironment) (*bool, error) { +func GetStorageAllowExpansion( + ctx context.Context, + crudClient client.Client, + defaultStorageClass string, +) (*bool, error) { storageClass := &storagev1.StorageClass{} - err := GetObject(env, client.ObjectKey{Name: defaultStorageClass}, storageClass) + err := objects.Get(ctx, crudClient, client.ObjectKey{Name: defaultStorageClass}, storageClass) return storageClass.AllowVolumeExpansion, err } // IsWalStorageEnabled returns true if 'WalStorage' is being used -func IsWalStorageEnabled(namespace, clusterName string, env *TestingEnvironment) (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) +func IsWalStorageEnabled( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (bool, error) { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if cluster.Spec.WalStorage == nil { return false, err } @@ -136,3 +151,28 @@ func SetSnapshotNameAsEnv( } return nil } + +// GetPVCList gathers the current list of PVCs in a namespace +func GetPVCList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*corev1.PersistentVolumeClaimList, error) { + pvcList := &corev1.PersistentVolumeClaimList{} + err := crudClient.List( + ctx, pvcList, client.InNamespace(namespace), + ) + return pvcList, err +} + +// GetSnapshotList gathers the current list of VolumeSnapshots in a namespace +func GetSnapshotList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*volumesnapshot.VolumeSnapshotList, error) { + list := &volumesnapshot.VolumeSnapshotList{} + err := crudClient.List(ctx, list, client.InNamespace(namespace)) + + return list, err +} diff --git a/tests/utils/time.go b/tests/utils/time.go deleted file mode 100644 index c89738e91e..0000000000 --- a/tests/utils/time.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "strings" - - corev1 "k8s.io/api/core/v1" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// GetCurrentTimestamp getting current time stamp from postgres server -func GetCurrentTimestamp(namespace, clusterName string, env *TestingEnvironment, podName *corev1.Pod) (string, error) { - host, err := GetHostName(namespace, clusterName, env) - if err != nil { - return "", err - } - appUser, appUserPass, err := GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - if err != nil { - return "", err - } - query := "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');" - stdOut, _, err := RunQueryFromPod( - podName, - host, - AppDBName, - appUser, - appUserPass, - query, - env, - ) - if err != nil { - return "", err - } - currentTimestamp := strings.Trim(stdOut, "\n") - return currentTimestamp, nil -} diff --git a/tests/utils/timeouts.go b/tests/utils/timeouts/timeouts.go similarity index 93% rename from tests/utils/timeouts.go rename to tests/utils/timeouts/timeouts.go index 8edf62d1de..6eb1c2dc36 100644 --- a/tests/utils/timeouts.go +++ b/tests/utils/timeouts/timeouts.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +13,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package timeouts contains the timeouts for the E2E test suite +package timeouts import ( "encoding/json" diff --git a/tests/utils/utils.go b/tests/utils/utils.go new file mode 100644 index 0000000000..18ac87ada7 --- /dev/null +++ b/tests/utils/utils.go @@ -0,0 +1,173 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package utils + +import ( + "bytes" + "context" + "fmt" + "text/tabwriter" + + "github.com/cheynewallace/tabby" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + utils2 "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" +) + +// PrintClusterResources prints a summary of the cluster pods, jobs, pvcs etc. +func PrintClusterResources(ctx context.Context, crudClient client.Client, namespace, clusterName string) string { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return fmt.Sprintf("Error while Getting Object %v", err) + } + + buffer := &bytes.Buffer{} + w := tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0) + clusterInfo := tabby.NewCustom(w) + clusterInfo.AddLine("Timeout while waiting for cluster ready, dumping more cluster information for analysis...") + clusterInfo.AddLine() + clusterInfo.AddLine() + clusterInfo.AddLine("Cluster information:") + clusterInfo.AddLine("Name", cluster.GetName()) + clusterInfo.AddLine("Namespace", cluster.GetNamespace()) + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + clusterInfo.AddLine("Spec.Instances", cluster.Spec.Instances) + clusterInfo.AddLine("Wal storage", cluster.ShouldCreateWalArchiveVolume()) + clusterInfo.AddLine("Cluster phase", cluster.Status.Phase) + clusterInfo.AddLine("Phase reason", cluster.Status.PhaseReason) + clusterInfo.AddLine("Cluster target primary", cluster.Status.TargetPrimary) + clusterInfo.AddLine("Cluster current primary", cluster.Status.CurrentPrimary) + clusterInfo.AddLine() + + podList, _ := clusterutils.ListPods(ctx, crudClient, cluster.GetNamespace(), cluster.GetName()) + + clusterInfo.AddLine("Cluster Pods information:") + clusterInfo.AddLine("Ready pod number: ", utils2.CountReadyPods(podList.Items)) + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + for _, pod := range podList.Items { + clusterInfo.AddLine("Pod name", pod.Name) + clusterInfo.AddLine("Pod phase", pod.Status.Phase) + if cluster.Status.InstancesReportedState != nil { + if instanceReportState, ok := cluster.Status.InstancesReportedState[v1.PodName(pod.Name)]; ok { + clusterInfo.AddLine("Is Primary", instanceReportState.IsPrimary) + clusterInfo.AddLine("TimeLineID", instanceReportState.TimeLineID) + clusterInfo.AddLine("---", "---") + } + } else { + clusterInfo.AddLine("InstanceReportState not reported", "") + } + } + + clusterInfo.AddLine("Jobs information:") + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + jobList := &batchv1.JobList{} + _ = crudClient.List( + ctx, jobList, client.InNamespace(namespace), + ) + for _, job := range jobList.Items { + clusterInfo.AddLine("Job name", job.Name) + clusterInfo.AddLine("Job status", fmt.Sprintf("%#v", job.Status)) + } + + pvcList, _ := storage.GetPVCList(ctx, crudClient, cluster.GetNamespace()) + clusterInfo.AddLine() + clusterInfo.AddLine("Cluster PVC information: (dumping all pvc under the namespace)") + clusterInfo.AddLine("Available Cluster PVCCount", cluster.Status.PVCCount) + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + for _, pvc := range pvcList.Items { + clusterInfo.AddLine("PVC name", pvc.Name) + clusterInfo.AddLine("PVC phase", pvc.Status.Phase) + clusterInfo.AddLine("---", "---") + } + + snapshotList, _ := storage.GetSnapshotList(ctx, crudClient, cluster.Namespace) + clusterInfo.AddLine() + clusterInfo.AddLine("Cluster Snapshot information: (dumping all snapshot under the namespace)") + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + for _, snapshot := range snapshotList.Items { + clusterInfo.AddLine("Snapshot name", snapshot.Name) + if snapshot.Status.ReadyToUse != nil { + clusterInfo.AddLine("Snapshot ready to use", *snapshot.Status.ReadyToUse) + } else { + clusterInfo.AddLine("Snapshot ready to use", "false") + } + clusterInfo.AddLine("---", "---") + } + + // do not remove, this is needed to ensure that the writer cache is always flushed. + clusterInfo.Print() + + return buffer.String() +} + +// ForgeArchiveWalOnMinio instead of using `switchWalCmd` to generate a real WAL archive, directly forges a WAL archive +// file on Minio by copying and renaming an existing WAL archive file for the sake of more control of testing. To make +// sure the forged one won't be a real WAL archive, we let the sequence in newWALName to be big enough so that it can't +// be a real WAL archive name in an idle postgresql. +func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error { + // Forge a WAL archive by copying and renaming the 1st WAL archive + minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000" + existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz" + newWALNamePath := minioWALBasePath + "/" + newWALName + forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath + _, _, err := run.UncheckedRetry(fmt.Sprintf( + "kubectl exec -n %v %v -- %v", + namespace, + miniClientPodName, + forgeWALOnMinioCmd)) + + return err +} + +// TestFileExist tests if a file specified with `fileName` exist under directory `directoryPath`, on pod `podName` in +// namespace `namespace` +func TestFileExist(namespace, podName, directoryPath, fileName string) bool { + filePath := directoryPath + "/" + fileName + testFileExistCommand := "test -f " + filePath + _, _, err := run.Unchecked(fmt.Sprintf( + "kubectl exec -n %v %v -- %v", + namespace, + podName, + testFileExistCommand)) + + return err == nil +} + +// TestDirectoryEmpty tests if a directory `directoryPath` exists on pod `podName` in namespace `namespace` +func TestDirectoryEmpty(namespace, podName, directoryPath string) bool { + testDirectoryEmptyCommand := "test \"$(ls -A" + directoryPath + ")\"" + _, _, err := run.Unchecked(fmt.Sprintf( + "kubectl exec -n %v %v -- %v", + namespace, + podName, + testDirectoryEmptyCommand)) + + return err == nil +} diff --git a/tests/utils/version.go b/tests/utils/version.go deleted file mode 100644 index 1c814faf8c..0000000000 --- a/tests/utils/version.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" -) - -// BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available) -func BumpPostgresImageMajorVersion(postgresImage string) (string, error) { - imageReference := utils.NewReference(postgresImage) - - postgresImageVersion, err := postgres.GetPostgresVersionFromTag(imageReference.Tag) - if err != nil { - return "", err - } - - targetPostgresImageVersionInt := postgresImageVersion + 1_00_00 - - defaultImageVersion, err := postgres.GetPostgresVersionFromTag(utils.GetImageTag(versions.DefaultImageName)) - if err != nil { - return "", err - } - - if targetPostgresImageVersionInt >= defaultImageVersion { - return postgresImage, nil - } - - imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion/10000+1) - - return imageReference.GetNormalizedName(), nil -} diff --git a/tests/utils/webapp.go b/tests/utils/webapp.go deleted file mode 100644 index ec2cc1b4bb..0000000000 --- a/tests/utils/webapp.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" -) - -// DefaultWebapp returns a struct representing a -func DefaultWebapp(namespace string, name string, rootCASecretName string, tlsSecretName string) corev1.Pod { - var secretMode int32 = 0o600 - seccompProfile := &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } - - return corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "secret-volume-root-ca", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: rootCASecretName, - DefaultMode: &secretMode, - }, - }, - }, - { - Name: "secret-volume-tls", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: tlsSecretName, - DefaultMode: &secretMode, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: name, - Image: "ghcr.io/cloudnative-pg/webtest:1.6.0", - Ports: []corev1.ContainerPort{ - { - ContainerPort: 8080, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "secret-volume-root-ca", - MountPath: "/etc/secrets/ca", - }, - { - Name: "secret-volume-tls", - MountPath: "/etc/secrets/tls", - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: seccompProfile, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - SeccompProfile: seccompProfile, - }, - }, - } -} diff --git a/tests/utils/webhooks.go b/tests/utils/webhooks.go deleted file mode 100644 index 210e541aa2..0000000000 --- a/tests/utils/webhooks.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bytes" - "context" - "fmt" - - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" -) - -// GetCNPGsMutatingWebhookByName get the MutatingWebhook filtered by the name of one -// of the webhooks -func GetCNPGsMutatingWebhookByName(env *TestingEnvironment, name string) ( - *admissionregistrationv1.MutatingWebhookConfiguration, int, error, -) { - var mWebhooks admissionregistrationv1.MutatingWebhookConfigurationList - err := GetObjectList(env, &mWebhooks) - if err != nil { - return nil, 0, err - } - - for i, item := range mWebhooks.Items { - for i2, webhook := range item.Webhooks { - if webhook.Name == name { - return &mWebhooks.Items[i], i2, nil - } - } - } - return nil, 0, fmt.Errorf("mutating webhook not found") -} - -// UpdateCNPGsMutatingWebhookConf update MutatingWebhookConfiguration object -func UpdateCNPGsMutatingWebhookConf(env *TestingEnvironment, - wh *admissionregistrationv1.MutatingWebhookConfiguration, -) error { - ctx := context.Background() - _, err := env.Interface.AdmissionregistrationV1(). - MutatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil -} - -// GetCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator -func GetCNPGsValidatingWebhookConf(env *TestingEnvironment) ( - *admissionregistrationv1.ValidatingWebhookConfiguration, error, -) { - ctx := context.Background() - validatingWebhookConfig, err := env.Interface.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get( - ctx, controller.ValidatingWebhookConfigurationName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return validatingWebhookConfig, nil -} - -// GetCNPGsValidatingWebhookByName get ValidatingWebhook by the name of one -// of the webhooks -func GetCNPGsValidatingWebhookByName(env *TestingEnvironment, name string) ( - *admissionregistrationv1.ValidatingWebhookConfiguration, int, error, -) { - var vWebhooks admissionregistrationv1.ValidatingWebhookConfigurationList - err := GetObjectList(env, &vWebhooks) - if err != nil { - return nil, 0, err - } - - for i, item := range vWebhooks.Items { - for i2, webhook := range item.Webhooks { - if webhook.Name == name { - return &vWebhooks.Items[i], i2, nil - } - } - } - return nil, 0, fmt.Errorf("validating webhook not found") -} - -// UpdateCNPGsValidatingWebhookConf update the ValidatingWebhook object -func UpdateCNPGsValidatingWebhookConf(env *TestingEnvironment, - wh *admissionregistrationv1.ValidatingWebhookConfiguration, -) error { - ctx := context.Background() - _, err := env.Interface.AdmissionregistrationV1(). - ValidatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil -} - -// CheckWebhookReady ensures that the operator has finished the webhook setup. -func CheckWebhookReady(env *TestingEnvironment, namespace string) error { - // Check CA - secret := &corev1.Secret{} - secretNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: controller.WebhookSecretName, - } - err := GetObject(env, secretNamespacedName, secret) - if err != nil { - return err - } - - ca := secret.Data["tls.crt"] - - mutatingWebhookConfig, err := env.GetCNPGsMutatingWebhookConf() - if err != nil { - return err - } - - for _, webhook := range mutatingWebhookConfig.Webhooks { - if !bytes.Equal(webhook.ClientConfig.CABundle, ca) { - return fmt.Errorf("secret %+v not match with ca bundle in %v: %v is not equal to %v", - controller.MutatingWebhookConfigurationName, secret, string(ca), string(webhook.ClientConfig.CABundle)) - } - } - - validatingWebhookConfig, err := GetCNPGsValidatingWebhookConf(env) - if err != nil { - return err - } - - for _, webhook := range validatingWebhookConfig.Webhooks { - if !bytes.Equal(webhook.ClientConfig.CABundle, ca) { - return fmt.Errorf("secret not match with ca bundle in %v", - controller.ValidatingWebhookConfigurationName) - } - } - - return nil -} - -// GetCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator -func (env TestingEnvironment) GetCNPGsMutatingWebhookConf() ( - *admissionregistrationv1.MutatingWebhookConfiguration, error, -) { - ctx := context.Background() - return env.Interface.AdmissionregistrationV1(). - MutatingWebhookConfigurations(). - Get(ctx, controller.MutatingWebhookConfigurationName, metav1.GetOptions{}) -} diff --git a/tests/utils/yaml.go b/tests/utils/yaml/yaml.go similarity index 56% rename from tests/utils/yaml.go rename to tests/utils/yaml/yaml.go index d418643470..f5601a6322 100644 --- a/tests/utils/yaml.go +++ b/tests/utils/yaml/yaml.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +13,24 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ -package utils +// Package yaml provides functions to handle yaml files +package yaml import ( "bytes" "fmt" "log" + "os" + "path/filepath" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" @@ -61,3 +71,33 @@ func ParseObjectsFromYAML(data []byte, namespace string) ([]client.Object, error } return objects, nil } + +// GetResourceNameFromYAML returns the name of a resource in a YAML file +func GetResourceNameFromYAML(scheme *runtime.Scheme, path string) (string, error) { + namespacedName, err := getResourceNamespacedNameFromYAML(scheme, path) + if err != nil { + return "", err + } + return namespacedName.Name, err +} + +// getResourceNamespacedNameFromYAML returns the NamespacedName representing a resource in a YAML file +func getResourceNamespacedNameFromYAML( + scheme *runtime.Scheme, + path string, +) (types.NamespacedName, error) { + data, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return types.NamespacedName{}, err + } + decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() + obj, _, err := decoder.Decode(data, nil, nil) + if err != nil { + return types.NamespacedName{}, err + } + objectMeta, err := meta.Accessor(obj) + if err != nil { + return types.NamespacedName{}, err + } + return types.NamespacedName{Namespace: objectMeta.GetNamespace(), Name: objectMeta.GetName()}, nil +}